From f61b1968e07a3ba2399bc814249d6a8ba5e80d39 Mon Sep 17 00:00:00 2001
From: =?utf8?q?Gustavo=20I=C3=B1iguez=20Goya?=
Date: Mon, 6 Mar 2023 12:37:24 +0100
Subject: [PATCH] Import opensnitch_1.5.8.1.orig.tar.gz
[dgit import orig opensnitch_1.5.8.1.orig.tar.gz]
---
.github/FUNDING.yml | 12 +
.github/ISSUE_TEMPLATE/bug_report.md | 52 +
.github/ISSUE_TEMPLATE/config.yml | 4 +
.github/ISSUE_TEMPLATE/feature-request.md | 15 +
.github/workflows/debian-package.yml | 57 +
.github/workflows/ebpf.yml | 48 +
.github/workflows/go.yml | 49 +
.gitignore | 4 +
LICENSE | 674 +++++
Makefile | 47 +
README.md | 24 +
daemon/.gitignore | 2 +
daemon/Gopkg.toml | 19 +
daemon/Makefile | 21 +
daemon/conman/connection.go | 266 ++
daemon/conman/connection_test.go | 127 +
daemon/core/core.go | 68 +
daemon/core/system.go | 23 +
daemon/core/version.go | 9 +
daemon/default-config.json | 17 +
daemon/dns/parse.go | 21 +
daemon/dns/track.go | 99 +
daemon/firewall/common/common.go | 102 +
daemon/firewall/config/config.go | 199 ++
daemon/firewall/iptables/iptables.go | 138 +
daemon/firewall/iptables/monitor.go | 62 +
daemon/firewall/iptables/rules.go | 77 +
daemon/firewall/iptables/system.go | 89 +
daemon/firewall/nftables/monitor.go | 55 +
daemon/firewall/nftables/nftables.go | 141 +
daemon/firewall/nftables/rules.go | 201 ++
daemon/firewall/nftables/system.go | 40 +
daemon/firewall/rules.go | 85 +
daemon/go.mod | 16 +
daemon/log/log.go | 212 ++
daemon/main.go | 415 +++
daemon/netfilter/packet.go | 57 +
daemon/netfilter/queue.c | 2 +
daemon/netfilter/queue.go | 242 ++
daemon/netfilter/queue.h | 113 +
daemon/netlink/socket.go | 153 +
daemon/netlink/socket_linux.go | 264 ++
daemon/netlink/socket_test.go | 116 +
daemon/netstat/entry.go | 32 +
daemon/netstat/find.go | 51 +
daemon/netstat/parse.go | 120 +
daemon/opensnitch.spec | 97 +
daemon/opensnitchd.service | 16 +
daemon/procmon/activepids.go | 89 +
daemon/procmon/activepids_test.go | 104 +
daemon/procmon/audit/client.go | 355 +++
daemon/procmon/audit/parse.go | 298 ++
daemon/procmon/cache.go | 339 +++
daemon/procmon/cache_test.go | 103 +
daemon/procmon/details.go | 197 ++
daemon/procmon/ebpf/cache.go | 118 +
daemon/procmon/ebpf/debug.go | 102 +
daemon/procmon/ebpf/ebpf.go | 188 ++
daemon/procmon/ebpf/find.go | 171 ++
daemon/procmon/ebpf/monitor.go | 127 +
daemon/procmon/ebpf/utils.go | 124 +
daemon/procmon/find.go | 108 +
daemon/procmon/find_test.go | 42 +
daemon/procmon/monitor/init.go | 79 +
daemon/procmon/parse.go | 134 +
daemon/procmon/process.go | 112 +
daemon/procmon/process_test.go | 135 +
daemon/rule/loader.go | 418 +++
daemon/rule/loader_test.go | 275 ++
daemon/rule/operator.go | 297 ++
daemon/rule/operator_lists.go | 263 ++
daemon/rule/operator_test.go | 742 +++++
daemon/rule/rule.go | 115 +
daemon/rule/rule_test.go | 47 +
daemon/rule/testdata/000-allow-chrome.json | 16 +
daemon/rule/testdata/001-deny-chrome.json | 16 +
daemon/rule/testdata/invalid-regexp-list.json | 31 +
daemon/rule/testdata/invalid-regexp.json | 16 +
.../testdata/lists/domains/domainlists.txt | 4 +
daemon/rule/testdata/lists/ips/ips.txt | 7 +
daemon/rule/testdata/lists/nets/nets.txt | 8 +
.../testdata/lists/regexp/domainsregexp.txt | 4 +
.../live_reload/test-live-reload-delete.json | 16 +
.../live_reload/test-live-reload-remove.json | 16 +
daemon/statistics/event.go | 32 +
daemon/statistics/stats.go | 244 ++
daemon/system-fw.json | 14 +
daemon/ui/client.go | 343 +++
daemon/ui/config.go | 118 +
daemon/ui/notifications.go | 304 ++
daemon/ui/protocol/.gitkeep | 0
debian/changelog | 233 ++
debian/control | 95 +
debian/copyright | 32 +
debian/gbp.conf | 2 +
debian/gitlab-ci.yml | 27 +
debian/opensnitch.init | 78 +
debian/opensnitch.install | 3 +
debian/opensnitch.logrotate | 13 +
debian/opensnitch.service | 16 +
debian/python3-opensnitch-ui.postinst | 18 +
debian/python3-opensnitch-ui.postrm | 15 +
debian/rules | 42 +
debian/source/format | 1 +
debian/source/options | 1 +
debian/tests/control | 2 +
debian/tests/test-resources.sh | 13 +
debian/upstream/metadata | 9 +
debian/watch | 4 +
ebpf_prog/Makefile | 159 ++
ebpf_prog/README | 29 +
ebpf_prog/arm-clang-asm-fix.patch | 14 +
ebpf_prog/file.patch | 11 +
ebpf_prog/opensnitch.c | 508 ++++
proto/.gitignore | 1 +
proto/Makefile | 14 +
proto/ui.proto | 129 +
release.sh | 28 +
.../opensnitch-ui-general-tab-deny.png | Bin 0 -> 109267 bytes
screenshots/opensnitch-ui-proc-details.png | Bin 0 -> 76358 bytes
screenshots/screenshot.png | Bin 0 -> 453311 bytes
ui/.gitignore | 5 +
ui/LICENSE | 28 +
ui/MANIFEST.in | 3 +
ui/Makefile | 17 +
ui/bin/opensnitch-ui | 101 +
ui/i18n/Makefile | 37 +
ui/i18n/README.md | 37 +
ui/i18n/generate_i18n.sh | 17 +
ui/i18n/locales/de_DE/opensnitch-de_DE.ts | 2384 ++++++++++++++++
ui/i18n/locales/es_ES/opensnitch-es_ES.ts | 2435 ++++++++++++++++
ui/i18n/locales/eu_ES/opensnitch-eu_ES.ts | 1878 +++++++++++++
ui/i18n/locales/fr_FR/opensnitch-fr_FR.ts | 2420 ++++++++++++++++
ui/i18n/locales/hu_HU/opensnitch-hu_HU.ts | 2256 +++++++++++++++
ui/i18n/locales/ja_JP/opensnitch-ja_JP.ts | 2164 +++++++++++++++
ui/i18n/locales/lt_LT/opensnitch-lt_LT.ts | 2408 ++++++++++++++++
ui/i18n/locales/nb_NO/opensnitch-nb_NO.ts | 2390 ++++++++++++++++
ui/i18n/locales/pt_BR/opensnitch-pt_BR.ts | 2248 +++++++++++++++
ui/i18n/locales/ro_RO/opensnitch-ro_RO.ts | 1732 ++++++++++++
ui/i18n/locales/ru_RU/opensnitch-ru_RU.ts | 2458 ++++++++++++++++
ui/i18n/locales/tr_TR/opensnitch-tr_TR.ts | 2459 +++++++++++++++++
ui/i18n/opensnitch_i18n.pro | 33 +
ui/opensnitch-ui.spec | 110 +
ui/opensnitch/__init__.py | 0
ui/opensnitch/config.py | 159 ++
ui/opensnitch/customwidgets/__init__.py | 0
.../customwidgets/addresstablemodel.py | 69 +
.../customwidgets/generictableview.py | 323 +++
ui/opensnitch/customwidgets/main.py | 535 ++++
ui/opensnitch/database.py | 439 +++
ui/opensnitch/desktop_parser.py | 178 ++
ui/opensnitch/dialogs/__init__.py | 0
ui/opensnitch/dialogs/preferences.py | 553 ++++
ui/opensnitch/dialogs/processdetails.py | 329 +++
ui/opensnitch/dialogs/prompt.py | 588 ++++
ui/opensnitch/dialogs/ruleseditor.py | 810 ++++++
ui/opensnitch/dialogs/stats.py | 2041 ++++++++++++++
ui/opensnitch/nodes.py | 308 +++
ui/opensnitch/notifications.py | 126 +
ui/opensnitch/res/__init__.py | 0
ui/opensnitch/res/icon-alert.png | Bin 0 -> 19598 bytes
ui/opensnitch/res/icon-off.png | Bin 0 -> 20195 bytes
ui/opensnitch/res/icon-pause.png | Bin 0 -> 16530 bytes
ui/opensnitch/res/icon-pause.svg | 109 +
ui/opensnitch/res/icon-red.png | Bin 0 -> 3858 bytes
ui/opensnitch/res/icon-white.png | Bin 0 -> 20760 bytes
ui/opensnitch/res/icon-white.svg | 80 +
ui/opensnitch/res/icon.png | Bin 0 -> 7780 bytes
ui/opensnitch/res/preferences.ui | 1406 ++++++++++
ui/opensnitch/res/process_details.ui | 269 ++
ui/opensnitch/res/prompt.ui | 866 ++++++
ui/opensnitch/res/resources.qrc | 8 +
ui/opensnitch/res/ruleseditor.ui | 946 +++++++
ui/opensnitch/res/stats.ui | 1652 +++++++++++
ui/opensnitch/service.py | 738 +++++
ui/opensnitch/utils.py | 293 ++
ui/opensnitch/version.py | 1 +
ui/requirements.txt | 5 +
ui/resources/icons/48x48/opensnitch-ui.png | Bin 0 -> 1834 bytes
ui/resources/icons/64x64/opensnitch-ui.png | Bin 0 -> 2380 bytes
ui/resources/icons/opensnitch-ui.svg | 95 +
...o.github.evilsocket.opensnitch.appdata.xml | 56 +
ui/resources/kcm_opensnitch.desktop | 9 +
ui/resources/opensnitch_ui.desktop | 18 +
ui/setup.py | 38 +
ui/tests/README.md | 23 +
ui/tests/__init__.py | 0
ui/tests/dialogs/__init__.py | 52 +
ui/tests/dialogs/test_preferences.py | 142 +
ui/tests/dialogs/test_ruleseditor.py | 384 +++
ui/tests/test_nodes.py | 149 +
utils/legacy/make_ads_rules.py | 69 +
utils/scripts/ads/update_adlists.sh | 91 +
193 files changed, 54098 insertions(+)
create mode 100644 .github/FUNDING.yml
create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md
create mode 100644 .github/ISSUE_TEMPLATE/config.yml
create mode 100644 .github/ISSUE_TEMPLATE/feature-request.md
create mode 100644 .github/workflows/debian-package.yml
create mode 100644 .github/workflows/ebpf.yml
create mode 100644 .github/workflows/go.yml
create mode 100644 .gitignore
create mode 100644 LICENSE
create mode 100644 Makefile
create mode 100644 README.md
create mode 100644 daemon/.gitignore
create mode 100644 daemon/Gopkg.toml
create mode 100644 daemon/Makefile
create mode 100644 daemon/conman/connection.go
create mode 100644 daemon/conman/connection_test.go
create mode 100644 daemon/core/core.go
create mode 100644 daemon/core/system.go
create mode 100644 daemon/core/version.go
create mode 100644 daemon/default-config.json
create mode 100644 daemon/dns/parse.go
create mode 100644 daemon/dns/track.go
create mode 100644 daemon/firewall/common/common.go
create mode 100644 daemon/firewall/config/config.go
create mode 100644 daemon/firewall/iptables/iptables.go
create mode 100644 daemon/firewall/iptables/monitor.go
create mode 100644 daemon/firewall/iptables/rules.go
create mode 100644 daemon/firewall/iptables/system.go
create mode 100644 daemon/firewall/nftables/monitor.go
create mode 100644 daemon/firewall/nftables/nftables.go
create mode 100644 daemon/firewall/nftables/rules.go
create mode 100644 daemon/firewall/nftables/system.go
create mode 100644 daemon/firewall/rules.go
create mode 100644 daemon/go.mod
create mode 100644 daemon/log/log.go
create mode 100644 daemon/main.go
create mode 100644 daemon/netfilter/packet.go
create mode 100644 daemon/netfilter/queue.c
create mode 100644 daemon/netfilter/queue.go
create mode 100644 daemon/netfilter/queue.h
create mode 100644 daemon/netlink/socket.go
create mode 100644 daemon/netlink/socket_linux.go
create mode 100644 daemon/netlink/socket_test.go
create mode 100644 daemon/netstat/entry.go
create mode 100644 daemon/netstat/find.go
create mode 100644 daemon/netstat/parse.go
create mode 100644 daemon/opensnitch.spec
create mode 100644 daemon/opensnitchd.service
create mode 100644 daemon/procmon/activepids.go
create mode 100644 daemon/procmon/activepids_test.go
create mode 100644 daemon/procmon/audit/client.go
create mode 100644 daemon/procmon/audit/parse.go
create mode 100644 daemon/procmon/cache.go
create mode 100644 daemon/procmon/cache_test.go
create mode 100644 daemon/procmon/details.go
create mode 100644 daemon/procmon/ebpf/cache.go
create mode 100644 daemon/procmon/ebpf/debug.go
create mode 100644 daemon/procmon/ebpf/ebpf.go
create mode 100644 daemon/procmon/ebpf/find.go
create mode 100644 daemon/procmon/ebpf/monitor.go
create mode 100644 daemon/procmon/ebpf/utils.go
create mode 100644 daemon/procmon/find.go
create mode 100644 daemon/procmon/find_test.go
create mode 100644 daemon/procmon/monitor/init.go
create mode 100644 daemon/procmon/parse.go
create mode 100644 daemon/procmon/process.go
create mode 100644 daemon/procmon/process_test.go
create mode 100644 daemon/rule/loader.go
create mode 100644 daemon/rule/loader_test.go
create mode 100644 daemon/rule/operator.go
create mode 100644 daemon/rule/operator_lists.go
create mode 100644 daemon/rule/operator_test.go
create mode 100644 daemon/rule/rule.go
create mode 100644 daemon/rule/rule_test.go
create mode 100644 daemon/rule/testdata/000-allow-chrome.json
create mode 100644 daemon/rule/testdata/001-deny-chrome.json
create mode 100644 daemon/rule/testdata/invalid-regexp-list.json
create mode 100644 daemon/rule/testdata/invalid-regexp.json
create mode 100644 daemon/rule/testdata/lists/domains/domainlists.txt
create mode 100644 daemon/rule/testdata/lists/ips/ips.txt
create mode 100644 daemon/rule/testdata/lists/nets/nets.txt
create mode 100644 daemon/rule/testdata/lists/regexp/domainsregexp.txt
create mode 100644 daemon/rule/testdata/live_reload/test-live-reload-delete.json
create mode 100644 daemon/rule/testdata/live_reload/test-live-reload-remove.json
create mode 100644 daemon/statistics/event.go
create mode 100644 daemon/statistics/stats.go
create mode 100644 daemon/system-fw.json
create mode 100644 daemon/ui/client.go
create mode 100644 daemon/ui/config.go
create mode 100644 daemon/ui/notifications.go
create mode 100644 daemon/ui/protocol/.gitkeep
create mode 100644 debian/changelog
create mode 100644 debian/control
create mode 100644 debian/copyright
create mode 100644 debian/gbp.conf
create mode 100644 debian/gitlab-ci.yml
create mode 100644 debian/opensnitch.init
create mode 100644 debian/opensnitch.install
create mode 100644 debian/opensnitch.logrotate
create mode 100644 debian/opensnitch.service
create mode 100755 debian/python3-opensnitch-ui.postinst
create mode 100755 debian/python3-opensnitch-ui.postrm
create mode 100755 debian/rules
create mode 100644 debian/source/format
create mode 100644 debian/source/options
create mode 100644 debian/tests/control
create mode 100755 debian/tests/test-resources.sh
create mode 100644 debian/upstream/metadata
create mode 100644 debian/watch
create mode 100644 ebpf_prog/Makefile
create mode 100644 ebpf_prog/README
create mode 100644 ebpf_prog/arm-clang-asm-fix.patch
create mode 100644 ebpf_prog/file.patch
create mode 100644 ebpf_prog/opensnitch.c
create mode 100644 proto/.gitignore
create mode 100644 proto/Makefile
create mode 100644 proto/ui.proto
create mode 100755 release.sh
create mode 100644 screenshots/opensnitch-ui-general-tab-deny.png
create mode 100644 screenshots/opensnitch-ui-proc-details.png
create mode 100644 screenshots/screenshot.png
create mode 100644 ui/.gitignore
create mode 100644 ui/LICENSE
create mode 100644 ui/MANIFEST.in
create mode 100644 ui/Makefile
create mode 100755 ui/bin/opensnitch-ui
create mode 100644 ui/i18n/Makefile
create mode 100644 ui/i18n/README.md
create mode 100755 ui/i18n/generate_i18n.sh
create mode 100644 ui/i18n/locales/de_DE/opensnitch-de_DE.ts
create mode 100644 ui/i18n/locales/es_ES/opensnitch-es_ES.ts
create mode 100644 ui/i18n/locales/eu_ES/opensnitch-eu_ES.ts
create mode 100644 ui/i18n/locales/fr_FR/opensnitch-fr_FR.ts
create mode 100644 ui/i18n/locales/hu_HU/opensnitch-hu_HU.ts
create mode 100644 ui/i18n/locales/ja_JP/opensnitch-ja_JP.ts
create mode 100644 ui/i18n/locales/lt_LT/opensnitch-lt_LT.ts
create mode 100644 ui/i18n/locales/nb_NO/opensnitch-nb_NO.ts
create mode 100644 ui/i18n/locales/pt_BR/opensnitch-pt_BR.ts
create mode 100644 ui/i18n/locales/ro_RO/opensnitch-ro_RO.ts
create mode 100644 ui/i18n/locales/ru_RU/opensnitch-ru_RU.ts
create mode 100644 ui/i18n/locales/tr_TR/opensnitch-tr_TR.ts
create mode 100644 ui/i18n/opensnitch_i18n.pro
create mode 100644 ui/opensnitch-ui.spec
create mode 100644 ui/opensnitch/__init__.py
create mode 100644 ui/opensnitch/config.py
create mode 100644 ui/opensnitch/customwidgets/__init__.py
create mode 100644 ui/opensnitch/customwidgets/addresstablemodel.py
create mode 100644 ui/opensnitch/customwidgets/generictableview.py
create mode 100644 ui/opensnitch/customwidgets/main.py
create mode 100644 ui/opensnitch/database.py
create mode 100644 ui/opensnitch/desktop_parser.py
create mode 100644 ui/opensnitch/dialogs/__init__.py
create mode 100644 ui/opensnitch/dialogs/preferences.py
create mode 100644 ui/opensnitch/dialogs/processdetails.py
create mode 100644 ui/opensnitch/dialogs/prompt.py
create mode 100644 ui/opensnitch/dialogs/ruleseditor.py
create mode 100644 ui/opensnitch/dialogs/stats.py
create mode 100644 ui/opensnitch/nodes.py
create mode 100644 ui/opensnitch/notifications.py
create mode 100644 ui/opensnitch/res/__init__.py
create mode 100644 ui/opensnitch/res/icon-alert.png
create mode 100644 ui/opensnitch/res/icon-off.png
create mode 100644 ui/opensnitch/res/icon-pause.png
create mode 100644 ui/opensnitch/res/icon-pause.svg
create mode 100644 ui/opensnitch/res/icon-red.png
create mode 100644 ui/opensnitch/res/icon-white.png
create mode 100644 ui/opensnitch/res/icon-white.svg
create mode 100644 ui/opensnitch/res/icon.png
create mode 100644 ui/opensnitch/res/preferences.ui
create mode 100644 ui/opensnitch/res/process_details.ui
create mode 100644 ui/opensnitch/res/prompt.ui
create mode 100644 ui/opensnitch/res/resources.qrc
create mode 100644 ui/opensnitch/res/ruleseditor.ui
create mode 100644 ui/opensnitch/res/stats.ui
create mode 100644 ui/opensnitch/service.py
create mode 100644 ui/opensnitch/utils.py
create mode 100644 ui/opensnitch/version.py
create mode 100644 ui/requirements.txt
create mode 100644 ui/resources/icons/48x48/opensnitch-ui.png
create mode 100644 ui/resources/icons/64x64/opensnitch-ui.png
create mode 100644 ui/resources/icons/opensnitch-ui.svg
create mode 100644 ui/resources/io.github.evilsocket.opensnitch.appdata.xml
create mode 100644 ui/resources/kcm_opensnitch.desktop
create mode 100644 ui/resources/opensnitch_ui.desktop
create mode 100644 ui/setup.py
create mode 100644 ui/tests/README.md
create mode 100644 ui/tests/__init__.py
create mode 100644 ui/tests/dialogs/__init__.py
create mode 100644 ui/tests/dialogs/test_preferences.py
create mode 100644 ui/tests/dialogs/test_ruleseditor.py
create mode 100644 ui/tests/test_nodes.py
create mode 100644 utils/legacy/make_ads_rules.py
create mode 100755 utils/scripts/ads/update_adlists.sh
diff --git a/.github/FUNDING.yml b/.github/FUNDING.yml
new file mode 100644
index 0000000..f8e81cc
--- /dev/null
+++ b/.github/FUNDING.yml
@@ -0,0 +1,12 @@
+# These are supported funding model platforms
+
+github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2]
+patreon: evilsocket
+open_collective: # Replace with a single Open Collective username
+ko_fi: # Replace with a single Ko-fi username
+tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel
+community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry
+liberapay: # Replace with a single Liberapay username
+issuehunt: # Replace with a single IssueHunt username
+otechie: # Replace with a single Otechie username
+custom: # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2']
diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md
new file mode 100644
index 0000000..83149d1
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/bug_report.md
@@ -0,0 +1,52 @@
+---
+name: ð Bug report
+about: Create a report to help us improve
+title: ''
+labels: ''
+assignees: ''
+
+---
+
+Please, check the FAQ and Known Problems pages before creating the bug report:
+https://github.com/evilsocket/opensnitch/wiki/FAQs
+https://github.com/evilsocket/opensnitch/wiki/Known-problems
+
+**Describe the bug**
+A clear and concise description of what the bug is.
+
+Include the following information:
+ - OpenSnitch version.
+ - OS: [e.g. Debian GNU/Linux, ArchLinux, Slackware, ...]
+ - Version [e.g. Buster, 10.3, 20.04]
+ - Window Manager: [e.g. GNOME Shell, KDE, enlightenment, i3wm, ...]
+ - Kernel version: echo $(uname -a)
+
+**To Reproduce**
+Describe in detail as much as you can what happened.
+
+Steps to reproduce the behavior:
+1. Go to '...'
+2. Click on '....'
+3. Scroll down to '....'
+4. See error
+
+**Post error logs:**
+If it's a crash of the GUI:
+ - Launch it from a terminal and reproduce the issue.
+ - Post the errors logged to the terminal.
+
+If the daemon doesn't start:
+ - Post last 15 lines of the log file `/var/log/opensnitchd.log`
+ - Or launch it from a terminal as root (`# /usr/bin/opensnitchd -rules-path /etc/opensnitchd/rules`) and post the errors logged to the terminal.
+
+If the deb or rpm packages fail to install:
+ - Install them from a terminal (`$ sudo dpkg -i opensnitch*` / `$ sudo yum install opensnitch*`), and post the errors logged to stdout.
+
+**Expected behavior (optional)**
+A clear and concise description of what you expected to happen.
+
+**Screenshots**
+If applicable, add screenshots to help explain your problem. It may help to understand the issue much better.
+
+**Additional context**
+Add any other context about the problem here.
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
new file mode 100644
index 0000000..dd3c313
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -0,0 +1,4 @@
+contact_links:
+ - name: ð Question
+ url: https://github.com/evilsocket/opensnitch/discussions/new
+ about: Ask your question here
diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md
new file mode 100644
index 0000000..7e54dce
--- /dev/null
+++ b/.github/ISSUE_TEMPLATE/feature-request.md
@@ -0,0 +1,15 @@
+---
+name: ð¡ Feature request
+about: Suggest an idea
+title: '[Feature Request] '
+labels: feature
+assignees: ''
+
+---
+
+
+
+### Summary:
+
diff --git a/.github/workflows/debian-package.yml b/.github/workflows/debian-package.yml
new file mode 100644
index 0000000..afd2a36
--- /dev/null
+++ b/.github/workflows/debian-package.yml
@@ -0,0 +1,57 @@
+name: Build status
+on: [push, pull_request]
+jobs:
+
+ Builddeb:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ image: ["debian:bookworm", "debian:sid"]
+ container:
+ image: ${{ matrix.image }}
+ options: --cpus=2
+ steps:
+ - name: Dump GitHub context
+ env:
+ GITHUB_CONTEXT: ${{ toJson(github) }}
+ run: echo "$GITHUB_CONTEXT"
+
+ - name: Check out git code
+ uses: actions/checkout@v2
+
+ - name: Install pre-dependencies
+ env:
+ DEBIAN_FRONTEND: noninteractive
+ run: |
+ set -e
+ set -x
+ apt --quiet update
+ # Install stuff needed to check out the linuxcnc repo and turn it into a debian source package.
+ apt --yes --quiet install --no-install-suggests eatmydata
+ eatmydata apt --yes --quiet install --no-install-suggests git devscripts
+
+ - name: Install build dependencies
+ env:
+ DEBIAN_FRONTEND: noninteractive
+ run: |
+ set -e
+ set -x
+ eatmydata apt --yes --quiet build-dep --indep-only .
+
+ - name: Build source client
+ env:
+ DEBIAN_FRONTEND: noninteractive
+ run: |
+ set -e
+ set -x
+ # Workaround for missing source tarball
+ echo 1.0 > debian/source/format
+ yes y | eatmydata debuild -us -uc
+
+ - name: Test install debian packages
+ env:
+ DEBIAN_FRONTEND: noninteractive
+ run: |
+ set -e
+ set -x
+ eatmydata apt --yes --quiet install ../*.deb
diff --git a/.github/workflows/ebpf.yml b/.github/workflows/ebpf.yml
new file mode 100644
index 0000000..3ee1bbd
--- /dev/null
+++ b/.github/workflows/ebpf.yml
@@ -0,0 +1,48 @@
+name: Build eBPF
+on:
+
+ # Trigger this workflow only when ebpf modules changes.
+ push:
+ paths:
+ - 'ebpf_prog/*'
+ - '.github/workflows/ebpf.yml'
+ pull_request:
+ paths:
+ - 'ebpf_prog/*'
+ - '.github/workflows/ebpf.yml'
+
+ # Allow to run this workflow manually from the Actions tab
+ workflow_dispatch:
+
+jobs:
+
+ build:
+ name: Build eBPF object
+ runs-on: ubuntu-latest
+ steps:
+
+ - name: Check out git code
+ uses: actions/checkout@v2
+
+ - name: Get and prepare dependencies
+ run: |
+ set -e
+ set -x
+ sudo apt install eatmydata
+ sudo eatmydata apt install wget tar patch clang llvm libelf-dev libzip-dev flex bison libssl-dev bc rsync python3 binutils
+ eatmydata wget --no-verbose https://github.com/torvalds/linux/archive/v5.8.tar.gz
+ eatmydata tar -xf v5.8.tar.gz
+
+ - name: Build eBPF module
+ run: |
+ set -e
+ set -x
+ eatmydata patch linux-5.8/tools/lib/bpf/bpf_helpers.h < ebpf_prog/file.patch
+ eatmydata cp ebpf_prog/opensnitch.c ebpf_prog/Makefile linux-5.8/samples/bpf
+ cd linux-5.8 && yes "" | eatmydata make oldconfig
+ eatmydata make prepare
+ eatmydata make headers_install
+ cd samples/bpf
+ eatmydata make
+ eatmydata objdump -h opensnitch.o
+ eatmydata llvm-strip -g opensnitch.o
diff --git a/.github/workflows/go.yml b/.github/workflows/go.yml
new file mode 100644
index 0000000..09e94d7
--- /dev/null
+++ b/.github/workflows/go.yml
@@ -0,0 +1,49 @@
+name: Build status
+on:
+ # Trigger this workflow only when daemon code changes.
+ push:
+ paths:
+ - 'daemon/*'
+ - '.github/workflows/go.yml'
+ pull_request:
+ paths:
+ - 'daemon/*'
+ - '.github/workflows/go.yml'
+
+ # Allow to run this workflow manually from the Actions tab
+ workflow_dispatch:
+
+jobs:
+
+ build:
+ name: Build Go code
+ runs-on: ubuntu-latest
+ steps:
+
+ - name: Set up Go 1.15.15
+ uses: actions/setup-go@v1
+ with:
+ go-version: 1.15.15
+ id: go
+
+ - name: Check out code into the Go module directory
+ uses: actions/checkout@v2
+
+ - name: Get dependencies
+ run: |
+ sudo apt --yes --quiet install --no-install-suggests eatmydata
+ sudo eatmydata apt install git libnetfilter-queue-dev libmnl-dev libpcap-dev protobuf-compiler
+ export GOPATH=~/go
+ export PATH=$PATH:$GOPATH/bin
+ eatmydata go get github.com/golang/protobuf/protoc-gen-go
+ eatmydata go install google.golang.org/protobuf/cmd/protoc-gen-go
+ eatmydata go get google.golang.org/grpc/cmd/protoc-gen-go-grpc
+ cd proto
+ eatmydata make ../daemon/ui/protocol/ui.pb.go
+
+ - name: Build
+ run: |
+ cd daemon
+ eatmydata go mod tidy
+ eatmydata go mod vendor
+ eatmydata go build -v .
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..2697ff8
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,4 @@
+*.sock
+*.pyc
+*.profile
+rules
diff --git a/LICENSE b/LICENSE
new file mode 100644
index 0000000..f288702
--- /dev/null
+++ b/LICENSE
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..ee8fb4a
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,47 @@
+all: protocol opensnitch_daemon gui
+
+install:
+ @$(MAKE) -C daemon install
+ @$(MAKE) -C ui install
+
+protocol:
+ @$(MAKE) -C proto
+
+opensnitch_daemon:
+ @$(MAKE) -C daemon
+
+gui:
+ @$(MAKE) -C ui
+
+clean:
+ @$(MAKE) -C daemon clean
+ @$(MAKE) -C proto clean
+ @$(MAKE) -C ui clean
+
+run:
+ cd ui && pip3 install --upgrade . && cd ..
+ opensnitch-ui --socket unix:///tmp/osui.sock &
+ ./daemon/opensnitchd -rules-path /etc/opensnitchd/rules -ui-socket unix:///tmp/osui.sock -cpu-profile cpu.profile -mem-profile mem.profile
+
+test:
+ clear
+ $(MAKE) clean
+ clear
+ mkdir -p rules
+ $(MAKE)
+ clear
+ $(MAKE) run
+
+adblocker:
+ clear
+ $(MAKE) clean
+ clear
+ $(MAKE)
+ clear
+ python make_ads_rules.py
+ clear
+ cd ui && pip3 install --upgrade . && cd ..
+ opensnitch-ui --socket unix:///tmp/osui.sock &
+ ./daemon/opensnitchd -rules-path /etc/opensnitchd/rules -ui-socket unix:///tmp/osui.sock
+
+
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..68c7b15
--- /dev/null
+++ b/README.md
@@ -0,0 +1,24 @@
+
+
+
+
+
+
+
+
+
+
+
+**OpenSnitch** is a GNU/Linux application firewall.
+
+
+
+
+
+### Installation and configuration
+
+Please, refer to [the documentation](https://github.com/evilsocket/opensnitch/wiki) for detailed information.
+
+### Contributors
+
+[See the list](https://github.com/evilsocket/opensnitch/graphs/contributors)
diff --git a/daemon/.gitignore b/daemon/.gitignore
new file mode 100644
index 0000000..ac08621
--- /dev/null
+++ b/daemon/.gitignore
@@ -0,0 +1,2 @@
+opensnitchd
+vendor
diff --git a/daemon/Gopkg.toml b/daemon/Gopkg.toml
new file mode 100644
index 0000000..419b318
--- /dev/null
+++ b/daemon/Gopkg.toml
@@ -0,0 +1,19 @@
+[[constraint]]
+ name = "github.com/fsnotify/fsnotify"
+ version = "1.4.7"
+
+[[constraint]]
+ name = "github.com/google/gopacket"
+ version = "~1.1.14"
+
+[[constraint]]
+ name = "google.golang.org/grpc"
+ version = "~1.11.2"
+
+[[constraint]]
+ name = "github.com/evilsocket/ftrace"
+ version = "~1.2.0"
+
+[prune]
+ go-tests = true
+ unused-packages = true
diff --git a/daemon/Makefile b/daemon/Makefile
new file mode 100644
index 0000000..618e96d
--- /dev/null
+++ b/daemon/Makefile
@@ -0,0 +1,21 @@
+#SRC contains all *.go *.c *.h files in daemon/ and its subfolders
+SRC := $(shell find . -type f -name '*.go' -o -name '*.h' -o -name '*.c')
+
+all: opensnitchd
+
+install:
+ @mkdir -p /etc/opensnitchd/rules
+ @cp opensnitchd /usr/local/bin/
+ @cp opensnitchd.service /etc/systemd/system/
+ @cp default-config.json /etc/opensnitchd/
+ @cp system-fw.json /etc/opensnitchd/
+ @systemctl daemon-reload
+
+opensnitchd: $(SRC)
+ @go get
+ @go build -o opensnitchd .
+
+clean:
+ @rm -rf opensnitchd
+
+
diff --git a/daemon/conman/connection.go b/daemon/conman/connection.go
new file mode 100644
index 0000000..16aad41
--- /dev/null
+++ b/daemon/conman/connection.go
@@ -0,0 +1,266 @@
+package conman
+
+import (
+ "errors"
+ "fmt"
+ "net"
+ "os"
+
+ "github.com/evilsocket/opensnitch/daemon/core"
+ "github.com/evilsocket/opensnitch/daemon/dns"
+ "github.com/evilsocket/opensnitch/daemon/log"
+ "github.com/evilsocket/opensnitch/daemon/netfilter"
+ "github.com/evilsocket/opensnitch/daemon/netlink"
+ "github.com/evilsocket/opensnitch/daemon/netstat"
+ "github.com/evilsocket/opensnitch/daemon/procmon"
+ "github.com/evilsocket/opensnitch/daemon/procmon/ebpf"
+ "github.com/evilsocket/opensnitch/daemon/ui/protocol"
+
+ "github.com/google/gopacket/layers"
+)
+
+// Connection represents an outgoing connection.
+type Connection struct {
+ Protocol string
+ SrcIP net.IP
+ SrcPort uint
+ DstIP net.IP
+ DstPort uint
+ DstHost string
+ Entry *netstat.Entry
+ Process *procmon.Process
+
+ pkt *netfilter.Packet
+}
+
+var showUnknownCons = false
+
+// Parse extracts the IP layers from a network packet to determine what
+// process generated a connection.
+func Parse(nfp netfilter.Packet, interceptUnknown bool) *Connection {
+ showUnknownCons = interceptUnknown
+
+ if nfp.IsIPv4() {
+ con, err := NewConnection(&nfp)
+ if err != nil {
+ log.Debug("%s", err)
+ return nil
+ } else if con == nil {
+ return nil
+ }
+ return con
+ }
+
+ if core.IPv6Enabled == false {
+ return nil
+ }
+ con, err := NewConnection6(&nfp)
+ if err != nil {
+ log.Debug("%s", err)
+ return nil
+ } else if con == nil {
+ return nil
+ }
+ return con
+
+}
+
+func newConnectionImpl(nfp *netfilter.Packet, c *Connection, protoType string) (cr *Connection, err error) {
+ // no errors but not enough info neither
+ if c.parseDirection(protoType) == false {
+ return nil, nil
+ }
+ log.Debug("new connection %s => %d:%v -> %v:%d uid: %d", c.Protocol, c.SrcPort, c.SrcIP, c.DstIP, c.DstPort, nfp.UID)
+
+ c.Entry = &netstat.Entry{
+ Proto: c.Protocol,
+ SrcIP: c.SrcIP,
+ SrcPort: c.SrcPort,
+ DstIP: c.DstIP,
+ DstPort: c.DstPort,
+ UserId: -1,
+ INode: -1,
+ }
+
+ pid := -1
+ uid := -1
+ if procmon.MethodIsEbpf() {
+ pid, uid, err = ebpf.GetPid(c.Protocol, c.SrcPort, c.SrcIP, c.DstIP, c.DstPort)
+ if err != nil {
+ log.Warning("ebpf warning: %v", err)
+ return nil, nil
+ }
+ }
+ // sometimes when using eBPF the connection is not found, but falling back to legacy
+ // methods helps to find it and avoid "unknown/kernel pop-ups". TODO: investigate
+ if pid < 0 {
+ // 0. lookup uid and inode via netlink. Can return several inodes.
+ // 1. lookup uid and inode using /proc/net/(udp|tcp|udplite)
+ // 2. lookup pid by inode
+ // 3. if this is coming from us, just accept
+ // 4. lookup process info by pid
+ var inodeList []int
+ uid, inodeList = netlink.GetSocketInfo(c.Protocol, c.SrcIP, c.SrcPort, c.DstIP, c.DstPort)
+ if len(inodeList) == 0 {
+ if c.Entry = netstat.FindEntry(c.Protocol, c.SrcIP, c.SrcPort, c.DstIP, c.DstPort); c.Entry == nil {
+ return nil, fmt.Errorf("Could not find netstat entry for: %s", c)
+ }
+ if c.Entry.INode > 0 {
+ log.Debug("connection found in netstat: %v", c.Entry)
+ inodeList = append([]int{c.Entry.INode}, inodeList...)
+ }
+ }
+ if len(inodeList) == 0 {
+ log.Debug("<== no inodes found, applying default action.")
+ }
+
+ for n, inode := range inodeList {
+ pid = procmon.GetPIDFromINode(inode, fmt.Sprint(inode, c.SrcIP, c.SrcPort, c.DstIP, c.DstPort))
+ if pid != -1 {
+ log.Debug("[%d] PID found %d [%d]", n, pid, inode)
+ c.Entry.INode = inode
+ break
+ }
+ }
+ }
+
+ if nfp.UID != 0xffffffff {
+ c.Entry.UserId = int(nfp.UID)
+ } else {
+ c.Entry.UserId = uid
+ }
+
+ if pid == os.Getpid() {
+ // return a Process object with our PID, to be able to exclude our own connections
+ // (to the UI on a local socket for example)
+ c.Process = procmon.NewProcess(pid, "")
+ return c, nil
+ }
+
+ if c.Process = procmon.FindProcess(pid, showUnknownCons); c.Process == nil {
+ return nil, fmt.Errorf("Could not find process by its pid %d for: %s", pid, c)
+ }
+
+ return c, nil
+}
+
+// NewConnection creates a new Connection object, and returns the details of it.
+func NewConnection(nfp *netfilter.Packet) (c *Connection, err error) {
+ ipv4 := nfp.Packet.Layer(layers.LayerTypeIPv4)
+ if ipv4 == nil {
+ return nil, errors.New("Error getting IPv4 layer")
+ }
+ ip, ok := ipv4.(*layers.IPv4)
+ if !ok {
+ return nil, errors.New("Error getting IPv4 layer data")
+ }
+ c = &Connection{
+ SrcIP: ip.SrcIP,
+ DstIP: ip.DstIP,
+ DstHost: dns.HostOr(ip.DstIP, ""),
+ pkt: nfp,
+ }
+ return newConnectionImpl(nfp, c, "")
+}
+
+// NewConnection6 creates a IPv6 new Connection object, and returns the details of it.
+func NewConnection6(nfp *netfilter.Packet) (c *Connection, err error) {
+ ipv6 := nfp.Packet.Layer(layers.LayerTypeIPv6)
+ if ipv6 == nil {
+ return nil, errors.New("Error getting IPv6 layer")
+ }
+ ip, ok := ipv6.(*layers.IPv6)
+ if !ok {
+ return nil, errors.New("Error getting IPv6 layer data")
+ }
+ c = &Connection{
+ SrcIP: ip.SrcIP,
+ DstIP: ip.DstIP,
+ DstHost: dns.HostOr(ip.DstIP, ""),
+ pkt: nfp,
+ }
+ return newConnectionImpl(nfp, c, "6")
+}
+
+func (c *Connection) parseDirection(protoType string) bool {
+ ret := false
+ if tcpLayer := c.pkt.Packet.Layer(layers.LayerTypeTCP); tcpLayer != nil {
+ if tcp, ok := tcpLayer.(*layers.TCP); ok == true && tcp != nil {
+ c.Protocol = "tcp" + protoType
+ c.DstPort = uint(tcp.DstPort)
+ c.SrcPort = uint(tcp.SrcPort)
+ ret = true
+
+ if tcp.DstPort == 53 {
+ c.getDomains(c.pkt, c)
+ }
+ }
+ } else if udpLayer := c.pkt.Packet.Layer(layers.LayerTypeUDP); udpLayer != nil {
+ if udp, ok := udpLayer.(*layers.UDP); ok == true && udp != nil {
+ c.Protocol = "udp" + protoType
+ c.DstPort = uint(udp.DstPort)
+ c.SrcPort = uint(udp.SrcPort)
+ ret = true
+
+ if udp.DstPort == 53 {
+ c.getDomains(c.pkt, c)
+ }
+ }
+ } else if udpliteLayer := c.pkt.Packet.Layer(layers.LayerTypeUDPLite); udpliteLayer != nil {
+ if udplite, ok := udpliteLayer.(*layers.UDPLite); ok == true && udplite != nil {
+ c.Protocol = "udplite" + protoType
+ c.DstPort = uint(udplite.DstPort)
+ c.SrcPort = uint(udplite.SrcPort)
+ ret = true
+ }
+ }
+
+ return ret
+}
+
+func (c *Connection) getDomains(nfp *netfilter.Packet, con *Connection) {
+ domains := dns.GetQuestions(nfp)
+ if len(domains) > 0 {
+ for _, dns := range domains {
+ con.DstHost = dns
+ }
+ }
+}
+
+// To returns the destination host of a connection.
+func (c *Connection) To() string {
+ if c.DstHost == "" {
+ return c.DstIP.String()
+ }
+ return c.DstHost
+}
+
+func (c *Connection) String() string {
+ if c.Entry == nil {
+ return fmt.Sprintf("%s ->(%s)-> %s:%d", c.SrcIP, c.Protocol, c.To(), c.DstPort)
+ }
+
+ if c.Process == nil {
+ return fmt.Sprintf("%s (uid:%d) ->(%s)-> %s:%d", c.SrcIP, c.Entry.UserId, c.Protocol, c.To(), c.DstPort)
+ }
+
+ return fmt.Sprintf("%s (%d) -> %s:%d (proto:%s uid:%d)", c.Process.Path, c.Process.ID, c.To(), c.DstPort, c.Protocol, c.Entry.UserId)
+}
+
+// Serialize returns a connection serialized.
+func (c *Connection) Serialize() *protocol.Connection {
+ return &protocol.Connection{
+ Protocol: c.Protocol,
+ SrcIp: c.SrcIP.String(),
+ SrcPort: uint32(c.SrcPort),
+ DstIp: c.DstIP.String(),
+ DstHost: c.DstHost,
+ DstPort: uint32(c.DstPort),
+ UserId: uint32(c.Entry.UserId),
+ ProcessId: uint32(c.Process.ID),
+ ProcessPath: c.Process.Path,
+ ProcessArgs: c.Process.Args,
+ ProcessEnv: c.Process.Env,
+ ProcessCwd: c.Process.CWD,
+ }
+}
diff --git a/daemon/conman/connection_test.go b/daemon/conman/connection_test.go
new file mode 100644
index 0000000..6bd4cf6
--- /dev/null
+++ b/daemon/conman/connection_test.go
@@ -0,0 +1,127 @@
+package conman
+
+import (
+ "fmt"
+ "net"
+ "testing"
+
+ "github.com/google/gopacket"
+ "github.com/google/gopacket/layers"
+
+ "github.com/evilsocket/opensnitch/daemon/netfilter"
+)
+
+// Adding new packets:
+// wireshark -> right click -> Copy as HexDump -> create []byte{}
+
+func NewTCPPacket() gopacket.Packet {
+ // 47676:192.168.1.100 -> 1.1.1.1:23
+ testTCPPacket := []byte{0x4c, 0x6e, 0x6e, 0xd5, 0x79, 0xbf, 0x00, 0x28, 0x9d, 0x43, 0x7f, 0xd7, 0x08, 0x00, 0x45, 0x10,
+ 0x00, 0x3c, 0x1d, 0x07, 0x40, 0x00, 0x40, 0x06, 0x59, 0x8e, 0xc0, 0xa8, 0x01, 0x6d, 0x01, 0x01,
+ 0x01, 0x01, 0xba, 0x3c, 0x00, 0x17, 0x47, 0x7e, 0xf3, 0x0b, 0x00, 0x00, 0x00, 0x00, 0xa0, 0x02,
+ 0xfa, 0xf0, 0x4c, 0x27, 0x00, 0x00, 0x02, 0x04, 0x05, 0xb4, 0x04, 0x02, 0x08, 0x0a, 0x91, 0xfb,
+ 0xb5, 0xf4, 0x00, 0x00, 0x00, 0x00, 0x01, 0x03, 0x03, 0x0a}
+ return gopacket.NewPacket(testTCPPacket, layers.LinkTypeEthernet, gopacket.Default)
+}
+
+func NewUDPPacket() gopacket.Packet {
+ // 29517:192.168.1.109 -> 1.0.0.1:53
+ testUDPPacketDNS := []byte{
+ 0x4c, 0x6e, 0x6e, 0xd5, 0x79, 0xbf, 0x00, 0x28, 0x9d, 0x43, 0x7f, 0xd7, 0x08, 0x00, 0x45, 0x00,
+ 0x00, 0x40, 0x54, 0x1a, 0x40, 0x00, 0x3f, 0x11, 0x24, 0x7d, 0xc0, 0xa8, 0x01, 0x6d, 0x01, 0x00,
+ 0x00, 0x01, 0x73, 0x4d, 0x00, 0x35, 0x00, 0x2c, 0xf1, 0x17, 0x05, 0x51, 0x00, 0x20, 0x00, 0x01,
+ 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x70, 0x69, 0x04, 0x68, 0x6f, 0x6c, 0x65, 0x00, 0x00,
+ 0x01, 0x00, 0x01, 0x00, 0x00, 0x29, 0x10, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00,
+ }
+
+ return gopacket.NewPacket(testUDPPacketDNS, layers.LinkTypeEthernet, gopacket.Default)
+}
+
+func EstablishConnection(proto, dst string) (net.Conn, error) {
+ c, err := net.Dial(proto, dst)
+ if err != nil {
+ fmt.Println(err)
+ return nil, err
+ }
+ return c, nil
+}
+
+func ListenOnPort(proto, port string) (net.Listener, error) {
+ l, err := net.Listen(proto, port)
+ if err != nil {
+ fmt.Println(err)
+ return nil, err
+ }
+ return l, nil
+}
+
+func NewPacket(pkt gopacket.Packet) *netfilter.Packet {
+ return &netfilter.Packet{
+ Packet: pkt,
+ UID: 666,
+ NetworkProtocol: netfilter.IPv4,
+ }
+}
+
+func NewDummyConnection(src, dst net.IP) *Connection {
+ return &Connection{
+ SrcIP: src,
+ DstIP: dst,
+ }
+}
+
+// Test TCP parseDirection()
+func TestParseTCPDirection(t *testing.T) {
+ srcIP := net.IP{192, 168, 1, 100}
+ dstIP := net.IP{1, 1, 1, 1}
+ c := NewDummyConnection(srcIP, dstIP)
+ // 47676:192.168.1.100 -> 1.1.1.1:23
+ pkt := NewPacket(NewTCPPacket())
+ c.pkt = pkt
+
+ // parseDirection extracts the src and dst port from a network packet.
+ if c.parseDirection("") == false {
+ t.Error("parseDirection() should not be false")
+ t.Fail()
+ }
+ if c.SrcPort != 47676 {
+ t.Error("parseDirection() SrcPort mismatch:", c)
+ t.Fail()
+ }
+ if c.DstPort != 23 {
+ t.Error("parseDirection() DstPort mismatch:", c)
+ t.Fail()
+ }
+ if c.Protocol != "tcp" {
+ t.Error("parseDirection() Protocol mismatch:", c)
+ t.Fail()
+ }
+}
+
+// Test UDP parseDirection()
+func TestParseUDPDirection(t *testing.T) {
+ srcIP := net.IP{192, 168, 1, 100}
+ dstIP := net.IP{1, 0, 0, 1}
+ c := NewDummyConnection(srcIP, dstIP)
+ // 29517:192.168.1.109 -> 1.0.0.1:53
+ pkt := NewPacket(NewUDPPacket())
+ c.pkt = pkt
+
+ // parseDirection extracts the src and dst port from a network packet.
+ if c.parseDirection("") == false {
+ t.Error("parseDirection() should not be false")
+ t.Fail()
+ }
+ if c.SrcPort != 29517 {
+ t.Error("parseDirection() SrcPort mismatch:", c)
+ t.Fail()
+ }
+ if c.DstPort != 53 {
+ t.Error("parseDirection() DstPort mismatch:", c)
+ t.Fail()
+ }
+ if c.Protocol != "udp" {
+ t.Error("parseDirection() Protocol mismatch:", c)
+ t.Fail()
+ }
+}
diff --git a/daemon/core/core.go b/daemon/core/core.go
new file mode 100644
index 0000000..2d58163
--- /dev/null
+++ b/daemon/core/core.go
@@ -0,0 +1,68 @@
+package core
+
+import (
+ "fmt"
+ "os"
+ "os/exec"
+ "os/user"
+ "path/filepath"
+ "strings"
+ "time"
+)
+
+const (
+ defaultTrimSet = "\r\n\t "
+)
+
+// Trim remove trailing spaces from a string.
+func Trim(s string) string {
+ return strings.Trim(s, defaultTrimSet)
+}
+
+// Exec spawns a new process and reurns the output.
+func Exec(executable string, args []string) (string, error) {
+ path, err := exec.LookPath(executable)
+ if err != nil {
+ return "", err
+ }
+
+ raw, err := exec.Command(path, args...).CombinedOutput()
+ if err != nil {
+ return "", err
+ }
+ return Trim(string(raw)), nil
+}
+
+// Exists checks if a path exists.
+func Exists(path string) bool {
+ if _, err := os.Stat(path); os.IsNotExist(err) {
+ return false
+ }
+ return true
+}
+
+// ExpandPath replaces '~' shorthand with the user's home directory.
+func ExpandPath(path string) (string, error) {
+ // Check if path is empty
+ if path != "" {
+ if strings.HasPrefix(path, "~") {
+ usr, err := user.Current()
+ if err != nil {
+ return "", err
+ }
+ // Replace only the first occurrence of ~
+ path = strings.Replace(path, "~", usr.HomeDir, 1)
+ }
+ return filepath.Abs(path)
+ }
+ return "", nil
+}
+
+// GetFileModTime checks if a file has been modified.
+func GetFileModTime(filepath string) (time.Time, error) {
+ fi, err := os.Stat(filepath)
+ if err != nil || fi.IsDir() {
+ return time.Now(), fmt.Errorf("GetFileModTime() Invalid file")
+ }
+ return fi.ModTime(), nil
+}
diff --git a/daemon/core/system.go b/daemon/core/system.go
new file mode 100644
index 0000000..2bbc93f
--- /dev/null
+++ b/daemon/core/system.go
@@ -0,0 +1,23 @@
+package core
+
+import (
+ "io/ioutil"
+ "strings"
+)
+
+var (
+ // IPv6Enabled indicates if IPv6 protocol is enabled in the system
+ IPv6Enabled = Exists("/proc/sys/net/ipv6")
+)
+
+// GetHostname returns the name of the host where the daemon is running.
+func GetHostname() string {
+ hostname, _ := ioutil.ReadFile("/proc/sys/kernel/hostname")
+ return strings.Replace(string(hostname), "\n", "", -1)
+}
+
+// GetKernelVersion returns the name of the host where the daemon is running.
+func GetKernelVersion() string {
+ version, _ := ioutil.ReadFile("/proc/sys/kernel/version")
+ return strings.Replace(string(version), "\n", "", -1)
+}
diff --git a/daemon/core/version.go b/daemon/core/version.go
new file mode 100644
index 0000000..04ae9e0
--- /dev/null
+++ b/daemon/core/version.go
@@ -0,0 +1,9 @@
+package core
+
+// version related consts
+const (
+ Name = "opensnitch-daemon"
+ Version = "1.5.8"
+ Author = "Simone 'evilsocket' Margaritelli"
+ Website = "https://github.com/evilsocket/opensnitch"
+)
diff --git a/daemon/default-config.json b/daemon/default-config.json
new file mode 100644
index 0000000..d53e4c1
--- /dev/null
+++ b/daemon/default-config.json
@@ -0,0 +1,17 @@
+{
+ "Server":
+ {
+ "Address":"unix:///tmp/osui.sock",
+ "LogFile":"/var/log/opensnitchd.log"
+ },
+ "DefaultAction": "allow",
+ "DefaultDuration": "once",
+ "InterceptUnknown": false,
+ "ProcMonitorMethod": "ebpf",
+ "LogLevel": 2,
+ "Firewall": "iptables",
+ "Stats": {
+ "MaxEvents": 150,
+ "MaxStats": 25
+ }
+}
diff --git a/daemon/dns/parse.go b/daemon/dns/parse.go
new file mode 100644
index 0000000..971eafe
--- /dev/null
+++ b/daemon/dns/parse.go
@@ -0,0 +1,21 @@
+package dns
+
+import (
+ "github.com/evilsocket/opensnitch/daemon/netfilter"
+ "github.com/google/gopacket/layers"
+)
+
+// GetQuestions retrieves the domain names a process is trying to resolve.
+func GetQuestions(nfp *netfilter.Packet) (questions []string) {
+ dnsLayer := nfp.Packet.Layer(layers.LayerTypeDNS)
+ if dnsLayer == nil {
+ return questions
+ }
+
+ dns, _ := dnsLayer.(*layers.DNS)
+ for _, dnsQuestion := range dns.Questions {
+ questions = append(questions, string(dnsQuestion.Name))
+ }
+
+ return questions
+}
diff --git a/daemon/dns/track.go b/daemon/dns/track.go
new file mode 100644
index 0000000..5739b7d
--- /dev/null
+++ b/daemon/dns/track.go
@@ -0,0 +1,99 @@
+package dns
+
+import (
+ "net"
+ "sync"
+
+ "github.com/evilsocket/opensnitch/daemon/log"
+
+ "github.com/google/gopacket"
+ "github.com/google/gopacket/layers"
+)
+
+var (
+ responses = make(map[string]string, 0)
+ lock = sync.RWMutex{}
+)
+
+// TrackAnswers obtains the resolved domains of a DNS query.
+// If the packet is UDP DNS, the domain names are added to the list of resolved domains.
+func TrackAnswers(packet gopacket.Packet) bool {
+ udpLayer := packet.Layer(layers.LayerTypeUDP)
+ if udpLayer == nil {
+ return false
+ }
+
+ udp, ok := udpLayer.(*layers.UDP)
+ if ok == false || udp == nil {
+ return false
+ }
+ if udp.SrcPort != 53 {
+ return false
+ }
+
+ dnsLayer := packet.Layer(layers.LayerTypeDNS)
+ if dnsLayer == nil {
+ return false
+ }
+
+ dnsAns, ok := dnsLayer.(*layers.DNS)
+ if ok == false || dnsAns == nil {
+ return false
+ }
+
+ for _, ans := range dnsAns.Answers {
+ if ans.Name != nil {
+ if ans.IP != nil {
+ Track(ans.IP.String(), string(ans.Name))
+ } else if ans.CNAME != nil {
+ Track(string(ans.CNAME), string(ans.Name))
+ }
+ }
+ }
+
+ return true
+}
+
+// Track adds a resolved domain to the list.
+func Track(resolved string, hostname string) {
+ lock.Lock()
+ defer lock.Unlock()
+
+ if resolved == "127.0.0.1" || resolved == "::1" {
+ return
+ }
+ responses[resolved] = hostname
+
+ log.Debug("New DNS record: %s -> %s", resolved, hostname)
+}
+
+// Host returns if a resolved domain is in the list.
+func Host(resolved string) (host string, found bool) {
+ lock.RLock()
+ defer lock.RUnlock()
+
+ host, found = responses[resolved]
+ return
+}
+
+// HostOr checks if an IP has a domain name already resolved.
+// If the domain is in the list it's returned, otherwise the IP will be returned.
+func HostOr(ip net.IP, or string) string {
+ if host, found := Host(ip.String()); found == true {
+ // host might have been CNAME; go back until we reach the "root"
+ seen := make(map[string]bool) // prevent possibility of loops
+ for {
+ orig, had := Host(host)
+ if seen[orig] {
+ break
+ }
+ if !had {
+ break
+ }
+ seen[orig] = true
+ host = orig
+ }
+ return host
+ }
+ return or
+}
diff --git a/daemon/firewall/common/common.go b/daemon/firewall/common/common.go
new file mode 100644
index 0000000..d1f2eab
--- /dev/null
+++ b/daemon/firewall/common/common.go
@@ -0,0 +1,102 @@
+package common
+
+import (
+ "sync"
+ "time"
+
+ "github.com/evilsocket/opensnitch/daemon/log"
+)
+
+type (
+ callback func()
+ callbackBool func() bool
+
+ stopChecker struct {
+ sync.RWMutex
+ ch chan bool
+ }
+
+ // Common holds common fields and functionality of both firewalls,
+ // iptables and nftables.
+ Common struct {
+ sync.RWMutex
+ QueueNum uint16
+ Running bool
+ RulesChecker *time.Ticker
+ stopCheckerChan *stopChecker
+ }
+)
+
+func (s *stopChecker) exit() chan bool {
+ s.RLock()
+ defer s.RUnlock()
+ return s.ch
+}
+
+func (s *stopChecker) stop() {
+ s.Lock()
+ defer s.Unlock()
+
+ if s.ch != nil {
+ s.ch <- true
+ close(s.ch)
+ s.ch = nil
+ }
+}
+
+// SetQueueNum sets the queue number used by the firewall.
+// It's the queue where all intercepted connections will be sent.
+func (c *Common) SetQueueNum(qNum *int) {
+ c.Lock()
+ defer c.Unlock()
+
+ if qNum != nil {
+ c.QueueNum = uint16(*qNum)
+ }
+
+}
+
+// IsRunning returns if the firewall is running or not.
+func (c *Common) IsRunning() bool {
+ c.RLock()
+ defer c.RUnlock()
+
+ return c != nil && c.Running
+}
+
+// NewRulesChecker starts monitoring firewall for configuration or rules changes.
+func (c *Common) NewRulesChecker(areRulesLoaded callbackBool, reloadRules callback) {
+ c.Lock()
+ defer c.Unlock()
+
+ c.stopCheckerChan = &stopChecker{ch: make(chan bool, 1)}
+ c.RulesChecker = time.NewTicker(time.Second * 30)
+
+ go c.startCheckingRules(areRulesLoaded, reloadRules)
+}
+
+// StartCheckingRules monitors if our rules are loaded.
+// If the rules to intercept traffic are not loaded, we'll try to insert them again.
+func (c *Common) startCheckingRules(areRulesLoaded callbackBool, reloadRules callback) {
+ for {
+ select {
+ case <-c.stopCheckerChan.exit():
+ goto Exit
+ case <-c.RulesChecker.C:
+ if areRulesLoaded() == false {
+ reloadRules()
+ }
+ }
+ }
+
+Exit:
+ log.Info("exit checking iptables rules")
+}
+
+// StopCheckingRules stops checking if firewall rules are loaded.
+func (c *Common) StopCheckingRules() {
+ if c.RulesChecker != nil {
+ c.RulesChecker.Stop()
+ }
+ c.stopCheckerChan.stop()
+}
diff --git a/daemon/firewall/config/config.go b/daemon/firewall/config/config.go
new file mode 100644
index 0000000..5345dcc
--- /dev/null
+++ b/daemon/firewall/config/config.go
@@ -0,0 +1,199 @@
+// Package config provides functionality to load and monitor the system
+// firewall rules.
+// It's inherited by the different firewall packages (iptables, nftables).
+//
+// The firewall rules defined by the user are reloaded in these cases:
+// - When the file system-fw.json changes.
+// - When the firewall rules are not present when listing them.
+//
+package config
+
+import (
+ "encoding/json"
+ "io/ioutil"
+ "sync"
+
+ "github.com/evilsocket/opensnitch/daemon/log"
+ "github.com/fsnotify/fsnotify"
+)
+
+type callback func()
+
+// FwRule holds the fields of a rule
+type FwRule struct {
+ sync.RWMutex
+
+ Description string
+ Table string
+ Chain string
+ Parameters string
+ Target string
+ TargetParameters string
+}
+
+type rulesList struct {
+ sync.RWMutex
+
+ Rule *FwRule
+}
+
+// SystemConfig holds the list of rules to be added to the system
+type SystemConfig struct {
+ sync.RWMutex
+ SystemRules []*rulesList
+}
+
+// Config holds the functionality to re/load the firewall configuration from disk.
+// This is the configuration to manage the system firewall (iptables, nftables).
+type Config struct {
+ sync.Mutex
+
+ file string
+ watcher *fsnotify.Watcher
+ monitorExitChan chan bool
+ SysConfig SystemConfig
+
+ // subscribe to this channel to receive config reload events
+ ReloadConfChan chan bool
+
+ // preloadCallback is called before reloading the configuration,
+ // in order to delete old fw rules.
+ preloadCallback callback
+}
+
+// NewSystemFwConfig initializes config fields
+func (c *Config) NewSystemFwConfig(preLoadCb callback) (*Config, error) {
+ var err error
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ log.Warning("Error creating firewall config watcher: %s", err)
+ return nil, err
+ }
+
+ c.Lock()
+ defer c.Unlock()
+
+ c.file = "/etc/opensnitchd/system-fw.json"
+ c.monitorExitChan = make(chan bool, 1)
+ c.preloadCallback = preLoadCb
+ c.watcher = watcher
+ c.ReloadConfChan = make(chan bool, 1)
+ return c, nil
+}
+
+// LoadDiskConfiguration reads and loads the firewall configuration from disk
+func (c *Config) LoadDiskConfiguration(reload bool) {
+ c.Lock()
+ defer c.Unlock()
+
+ raw, err := ioutil.ReadFile(c.file)
+ if err != nil {
+ log.Error("Error reading firewall configuration from disk %s: %s", c.file, err)
+ return
+ }
+
+ c.loadConfiguration(raw)
+ // we need to monitor the configuration file for changes, regardless if it's
+ // malformed or not.
+ c.watcher.Remove(c.file)
+ if err := c.watcher.Add(c.file); err != nil {
+ log.Error("Could not watch firewall configuration: %s", err)
+ return
+ }
+
+ if reload {
+ c.ReloadConfChan <- true
+ return
+ }
+
+ go c.monitorConfigWorker()
+}
+
+// loadConfigutation reads the system firewall rules from disk.
+// Then the rules are added based on the configuration defined.
+func (c *Config) loadConfiguration(rawConfig []byte) {
+ c.SysConfig.Lock()
+ defer c.SysConfig.Unlock()
+
+ // delete old system rules, that may be different from the new ones
+ c.preloadCallback()
+
+ if err := json.Unmarshal(rawConfig, &c.SysConfig); err != nil {
+ // we only log the parser error, giving the user a chance to write a valid config
+ log.Error("Error parsing firewall configuration %s: %s", c.file, err)
+ }
+ log.Info("fw configuration loaded")
+}
+
+func (c *Config) saveConfiguration(rawConfig string) error {
+ conf, err := json.Marshal([]byte(rawConfig))
+ if err != nil {
+ log.Error("saving json firewall configuration: %s %s", err, conf)
+ return err
+ }
+
+ c.loadConfiguration([]byte(rawConfig))
+
+ if err = ioutil.WriteFile(c.file, []byte(rawConfig), 0644); err != nil {
+ log.Error("writing firewall configuration to disk: %s", err)
+ return err
+ }
+ return nil
+}
+
+// StopConfigWatcher stops the configuration watcher and stops the subroutine.
+func (c *Config) StopConfigWatcher() {
+ c.Lock()
+ defer c.Unlock()
+
+ if c.monitorExitChan != nil {
+ c.monitorExitChan <- true
+ close(c.monitorExitChan)
+ }
+ if c.ReloadConfChan != nil {
+ c.ReloadConfChan <- false // exit
+ close(c.ReloadConfChan)
+ }
+
+ if c.watcher != nil {
+ c.watcher.Remove(c.file)
+ c.watcher.Close()
+ }
+}
+
+func (c *Config) monitorConfigWorker() {
+ for {
+ select {
+ case <-c.monitorExitChan:
+ goto Exit
+ case event := <-c.watcher.Events:
+ if (event.Op&fsnotify.Write == fsnotify.Write) || (event.Op&fsnotify.Remove == fsnotify.Remove) {
+ c.LoadDiskConfiguration(true)
+ }
+ }
+ }
+Exit:
+ log.Debug("stop monitoring firewall config file")
+ c.Lock()
+ c.monitorExitChan = nil
+ c.Unlock()
+}
+
+// MonitorSystemFw waits for configuration reloads.
+func (c *Config) MonitorSystemFw(reloadCallback callback) {
+ for {
+ select {
+ case reload := <-c.ReloadConfChan:
+ if reload {
+ reloadCallback()
+ } else {
+ goto Exit
+ }
+ }
+ }
+Exit:
+ log.Info("iptables, stop monitoring system fw rules")
+ c.Lock()
+ c.ReloadConfChan = nil
+ c.Unlock()
+}
diff --git a/daemon/firewall/iptables/iptables.go b/daemon/firewall/iptables/iptables.go
new file mode 100644
index 0000000..fbef516
--- /dev/null
+++ b/daemon/firewall/iptables/iptables.go
@@ -0,0 +1,138 @@
+package iptables
+
+import (
+ "os/exec"
+ "regexp"
+ "sync"
+
+ "github.com/evilsocket/opensnitch/daemon/firewall/common"
+ "github.com/evilsocket/opensnitch/daemon/firewall/config"
+ "github.com/evilsocket/opensnitch/daemon/log"
+)
+
+// Action is the modifier we apply to a rule.
+type Action string
+
+const (
+ // Name is the name that identifies this firewall
+ Name = "iptables"
+ // SystemRulePrefix prefix added to each system rule
+ SystemRulePrefix = "opensnitch-filter"
+)
+
+// Actions we apply to the firewall.
+const (
+ ADD = Action("-A")
+ INSERT = Action("-I")
+ DELETE = Action("-D")
+ FLUSH = Action("-F")
+ NEWCHAIN = Action("-N")
+ DELCHAIN = Action("-X")
+)
+
+// SystemChains holds the fw rules defined by the user
+type SystemChains struct {
+ sync.RWMutex
+ Rules map[string]config.FwRule
+}
+
+// Iptables struct holds the fields of the iptables fw
+type Iptables struct {
+ sync.Mutex
+ config.Config
+ common.Common
+
+ bin string
+ bin6 string
+
+ regexRulesQuery *regexp.Regexp
+ regexSystemRulesQuery *regexp.Regexp
+
+ chains SystemChains
+}
+
+// Fw initializes a new Iptables object
+func Fw() (*Iptables, error) {
+ if err := IsAvailable(); err != nil {
+ return nil, err
+ }
+
+ reRulesQuery, _ := regexp.Compile(`NFQUEUE.*ctstate NEW,RELATED.*NFQUEUE num.*bypass`)
+ reSystemRulesQuery, _ := regexp.Compile(SystemRulePrefix + ".*")
+
+ ipt := &Iptables{
+ bin: "iptables",
+ bin6: "ip6tables",
+ regexRulesQuery: reRulesQuery,
+ regexSystemRulesQuery: reSystemRulesQuery,
+ chains: SystemChains{Rules: make(map[string]config.FwRule)},
+ }
+ return ipt, nil
+}
+
+// Name returns the firewall name
+func (ipt *Iptables) Name() string {
+ return Name
+}
+
+// Init inserts the firewall rules and starts monitoring for firewall
+// changes.
+func (ipt *Iptables) Init(qNum *int) {
+ if ipt.IsRunning() {
+ return
+ }
+ ipt.SetQueueNum(qNum)
+
+ // In order to clean up any existing firewall rule before start,
+ // we need to load the fw configuration first.
+ ipt.NewSystemFwConfig(ipt.preloadConfCallback)
+ go ipt.MonitorSystemFw(ipt.AddSystemRules)
+ ipt.LoadDiskConfiguration(false)
+
+ // start from a clean state
+ ipt.CleanRules(false)
+ ipt.InsertRules()
+
+ ipt.AddSystemRules()
+ // start monitoring firewall rules to intercept network traffic
+ ipt.NewRulesChecker(ipt.AreRulesLoaded, ipt.reloadRulesCallback)
+
+ ipt.Running = true
+}
+
+// Stop deletes the firewall rules, allowing network traffic.
+func (ipt *Iptables) Stop() {
+ if ipt.Running == false {
+ return
+ }
+ ipt.StopConfigWatcher()
+ ipt.StopCheckingRules()
+ ipt.CleanRules(log.GetLogLevel() == log.DEBUG)
+
+ ipt.Running = false
+}
+
+// IsAvailable checks if iptables is installed in the system.
+func IsAvailable() error {
+ _, err := exec.Command("iptables", []string{"-V"}...).CombinedOutput()
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// InsertRules adds fw rules to intercept connections
+func (ipt *Iptables) InsertRules() {
+ if err4, err6 := ipt.QueueDNSResponses(true, true); err4 != nil || err6 != nil {
+ log.Error("Error while running DNS firewall rule: %s %s", err4, err6)
+ } else if err4, err6 = ipt.QueueConnections(true, true); err4 != nil || err6 != nil {
+ log.Fatal("Error while running conntrack firewall rule: %s %s", err4, err6)
+ }
+}
+
+// CleanRules deletes the rules we added.
+func (ipt *Iptables) CleanRules(logErrors bool) {
+ ipt.QueueDNSResponses(false, logErrors)
+ ipt.QueueConnections(false, logErrors)
+ ipt.DeleteSystemRules(true, logErrors)
+}
diff --git a/daemon/firewall/iptables/monitor.go b/daemon/firewall/iptables/monitor.go
new file mode 100644
index 0000000..316b3b8
--- /dev/null
+++ b/daemon/firewall/iptables/monitor.go
@@ -0,0 +1,62 @@
+package iptables
+
+import (
+ "github.com/evilsocket/opensnitch/daemon/core"
+ "github.com/evilsocket/opensnitch/daemon/log"
+)
+
+// AreRulesLoaded checks if the firewall rules for intercept traffic are loaded.
+func (ipt *Iptables) AreRulesLoaded() bool {
+ var outMangle6 string
+
+ outMangle, err := core.Exec("iptables", []string{"-n", "-L", "OUTPUT", "-t", "mangle"})
+ if err != nil {
+ return false
+ }
+
+ if core.IPv6Enabled {
+ outMangle6, err = core.Exec("ip6tables", []string{"-n", "-L", "OUTPUT", "-t", "mangle"})
+ if err != nil {
+ return false
+ }
+ }
+
+ systemRulesLoaded := true
+ ipt.chains.RLock()
+ if len(ipt.chains.Rules) > 0 {
+ for _, rule := range ipt.chains.Rules {
+ if chainOut4, err4 := core.Exec("iptables", []string{"-n", "-L", rule.Chain, "-t", rule.Table}); err4 == nil {
+ if ipt.regexSystemRulesQuery.FindString(chainOut4) == "" {
+ systemRulesLoaded = false
+ break
+ }
+ }
+ if core.IPv6Enabled {
+ if chainOut6, err6 := core.Exec("ip6tables", []string{"-n", "-L", rule.Chain, "-t", rule.Table}); err6 == nil {
+ if ipt.regexSystemRulesQuery.FindString(chainOut6) == "" {
+ systemRulesLoaded = false
+ break
+ }
+ }
+ }
+ }
+ }
+ ipt.chains.RUnlock()
+
+ result := ipt.regexRulesQuery.FindString(outMangle) != "" &&
+ systemRulesLoaded
+
+ if core.IPv6Enabled {
+ result = result && ipt.regexRulesQuery.FindString(outMangle6) != ""
+ }
+
+ return result
+}
+
+func (ipt *Iptables) reloadRulesCallback() {
+ log.Important("firewall rules changed, reloading")
+ ipt.QueueDNSResponses(false, false)
+ ipt.QueueConnections(false, false)
+ ipt.InsertRules()
+ ipt.AddSystemRules()
+}
diff --git a/daemon/firewall/iptables/rules.go b/daemon/firewall/iptables/rules.go
new file mode 100644
index 0000000..0b32f96
--- /dev/null
+++ b/daemon/firewall/iptables/rules.go
@@ -0,0 +1,77 @@
+package iptables
+
+import (
+ "fmt"
+
+ "github.com/evilsocket/opensnitch/daemon/core"
+ "github.com/evilsocket/opensnitch/daemon/log"
+ "github.com/vishvananda/netlink"
+)
+
+// RunRule inserts or deletes a firewall rule.
+func (ipt *Iptables) RunRule(action Action, enable bool, logError bool, rule []string) (err4, err6 error) {
+ if enable == false {
+ action = "-D"
+ }
+
+ rule = append([]string{string(action)}, rule...)
+
+ ipt.Lock()
+ defer ipt.Unlock()
+
+ if _, err4 = core.Exec(ipt.bin, rule); err4 != nil {
+ if logError {
+ log.Error("Error while running firewall rule, ipv4 err: %s", err4)
+ log.Error("rule: %s", rule)
+ }
+ }
+
+ // On some systems IPv6 is disabled
+ if core.IPv6Enabled {
+ if _, err6 = core.Exec(ipt.bin6, rule); err6 != nil {
+ if logError {
+ log.Error("Error while running firewall rule, ipv6 err: %s", err6)
+ log.Error("rule: %s", rule)
+ }
+ }
+ }
+
+ return
+}
+
+// QueueDNSResponses redirects DNS responses to us, in order to keep a cache
+// of resolved domains.
+// INPUT --protocol udp --sport 53 -j NFQUEUE --queue-num 0 --queue-bypass
+func (ipt *Iptables) QueueDNSResponses(enable bool, logError bool) (err4, err6 error) {
+ return ipt.RunRule(INSERT, enable, logError, []string{
+ "INPUT",
+ "--protocol", "udp",
+ "--sport", "53",
+ "-j", "NFQUEUE",
+ "--queue-num", fmt.Sprintf("%d", ipt.QueueNum),
+ "--queue-bypass",
+ })
+}
+
+// QueueConnections inserts the firewall rule which redirects connections to us.
+// They are queued until the user denies/accept them, or reaches a timeout.
+// OUTPUT -t mangle -m conntrack --ctstate NEW,RELATED -j NFQUEUE --queue-num 0 --queue-bypass
+func (ipt *Iptables) QueueConnections(enable bool, logError bool) (error, error) {
+ err4, err6 := ipt.RunRule(INSERT, enable, logError, []string{
+ "OUTPUT",
+ "-t", "mangle",
+ "-m", "conntrack",
+ "--ctstate", "NEW,RELATED",
+ "-j", "NFQUEUE",
+ "--queue-num", fmt.Sprintf("%d", ipt.QueueNum),
+ "--queue-bypass",
+ })
+ if enable {
+ // flush conntrack as soon as netfilter rule is set. This ensures that already-established
+ // connections will go to netfilter queue.
+ if err := netlink.ConntrackTableFlush(netlink.ConntrackTable); err != nil {
+ log.Error("error in ConntrackTableFlush %s", err)
+ }
+ }
+ return err4, err6
+}
diff --git a/daemon/firewall/iptables/system.go b/daemon/firewall/iptables/system.go
new file mode 100644
index 0000000..42da0dd
--- /dev/null
+++ b/daemon/firewall/iptables/system.go
@@ -0,0 +1,89 @@
+package iptables
+
+import (
+ "strings"
+
+ "github.com/evilsocket/opensnitch/daemon/firewall/config"
+ "github.com/evilsocket/opensnitch/daemon/log"
+)
+
+// CreateSystemRule creates the custom firewall chains and adds them to the system.
+func (ipt *Iptables) CreateSystemRule(rule *config.FwRule, logErrors bool) {
+ ipt.chains.Lock()
+ defer ipt.chains.Unlock()
+ if rule == nil {
+ return
+ }
+
+ chainName := SystemRulePrefix + "-" + rule.Chain
+ if _, ok := ipt.chains.Rules[rule.Table+"-"+chainName]; ok {
+ return
+ }
+ ipt.RunRule(NEWCHAIN, true, logErrors, []string{chainName, "-t", rule.Table})
+
+ // Insert the rule at the top of the chain
+ if err4, err6 := ipt.RunRule(INSERT, true, logErrors, []string{rule.Chain, "-t", rule.Table, "-j", chainName}); err4 == nil && err6 == nil {
+ ipt.chains.Rules[rule.Table+"-"+chainName] = *rule
+ }
+}
+
+// DeleteSystemRules deletes the system rules.
+// If force is false and the rule has not been previously added,
+// it won't try to delete the rules. Otherwise it'll try to delete them.
+func (ipt *Iptables) DeleteSystemRules(force, logErrors bool) {
+ ipt.chains.Lock()
+ defer ipt.chains.Unlock()
+
+ for _, r := range ipt.SysConfig.SystemRules {
+ if r.Rule == nil {
+ continue
+ }
+ chain := SystemRulePrefix + "-" + r.Rule.Chain
+ if _, ok := ipt.chains.Rules[r.Rule.Table+"-"+chain]; !ok && !force {
+ continue
+ }
+ ipt.RunRule(FLUSH, true, false, []string{chain, "-t", r.Rule.Table})
+ ipt.RunRule(DELETE, false, logErrors, []string{r.Rule.Chain, "-t", r.Rule.Table, "-j", chain})
+ ipt.RunRule(DELCHAIN, true, false, []string{chain, "-t", r.Rule.Table})
+ delete(ipt.chains.Rules, r.Rule.Table+"-"+chain)
+ }
+}
+
+// AddSystemRule inserts a new rule.
+func (ipt *Iptables) AddSystemRule(rule *config.FwRule, enable bool) (err4, err6 error) {
+ if rule == nil {
+ return nil, nil
+ }
+ rule.RLock()
+ defer rule.RUnlock()
+
+ chain := SystemRulePrefix + "-" + rule.Chain
+ if rule.Table == "" {
+ rule.Table = "filter"
+ }
+ r := []string{chain, "-t", rule.Table}
+ if rule.Parameters != "" {
+ r = append(r, strings.Split(rule.Parameters, " ")...)
+ }
+ r = append(r, []string{"-j", rule.Target}...)
+ if rule.TargetParameters != "" {
+ r = append(r, strings.Split(rule.TargetParameters, " ")...)
+ }
+
+ return ipt.RunRule(ADD, enable, true, r)
+}
+
+// AddSystemRules creates the system firewall from configuration.
+func (ipt *Iptables) AddSystemRules() {
+ ipt.DeleteSystemRules(true, false)
+
+ for _, r := range ipt.SysConfig.SystemRules {
+ ipt.CreateSystemRule(r.Rule, true)
+ ipt.AddSystemRule(r.Rule, true)
+ }
+}
+
+// preloadConfCallback gets called before the fw configuration is reloaded
+func (ipt *Iptables) preloadConfCallback() {
+ ipt.DeleteSystemRules(true, log.GetLogLevel() == log.DEBUG)
+}
diff --git a/daemon/firewall/nftables/monitor.go b/daemon/firewall/nftables/monitor.go
new file mode 100644
index 0000000..9e6621f
--- /dev/null
+++ b/daemon/firewall/nftables/monitor.go
@@ -0,0 +1,55 @@
+package nftables
+
+import (
+ "github.com/evilsocket/opensnitch/daemon/log"
+)
+
+// AreRulesLoaded checks if the firewall rules for intercept traffic are loaded.
+func (n *Nft) AreRulesLoaded() bool {
+ n.Lock()
+ defer n.Unlock()
+
+ nRules := 0
+ for _, table := range n.mangleTables {
+ rules, err := n.conn.GetRule(table, n.outputChains[table])
+ if err != nil {
+ log.Error("nftables mangle rules error: %s, %s", table.Name, n.outputChains[table].Name)
+ return false
+ }
+ for _, r := range rules {
+ if string(r.UserData) == fwKey {
+ nRules++
+ }
+ }
+ }
+ if nRules != 2 {
+ log.Warning("nftables mangle rules not loaded: %d", nRules)
+ return false
+ }
+
+ nRules = 0
+ for _, table := range n.filterTables {
+ rules, err := n.conn.GetRule(table, n.inputChains[table])
+ if err != nil {
+ log.Error("nftables filter rules error: %s, %s", table.Name, n.inputChains[table].Name)
+ return false
+ }
+ for _, r := range rules {
+ if string(r.UserData) == fwKey {
+ nRules++
+ }
+ }
+ }
+ if nRules != 2 {
+ log.Warning("nfables filter rules not loaded: %d", nRules)
+ return false
+ }
+
+ return true
+}
+
+func (n *Nft) reloadRulesCallback() {
+ log.Important("nftables firewall rules changed, reloading")
+ n.AddSystemRules()
+ n.InsertRules()
+}
diff --git a/daemon/firewall/nftables/nftables.go b/daemon/firewall/nftables/nftables.go
new file mode 100644
index 0000000..65975da
--- /dev/null
+++ b/daemon/firewall/nftables/nftables.go
@@ -0,0 +1,141 @@
+package nftables
+
+import (
+ "sync"
+
+ "github.com/evilsocket/opensnitch/daemon/firewall/common"
+ "github.com/evilsocket/opensnitch/daemon/firewall/config"
+ "github.com/evilsocket/opensnitch/daemon/firewall/iptables"
+ "github.com/evilsocket/opensnitch/daemon/log"
+ "github.com/google/nftables"
+)
+
+const (
+ // Name is the name that identifies this firewall
+ Name = "nftables"
+
+ mangleTableName = "mangle"
+ filterTableName = "filter"
+ // The following chains will be under our own mangle or filter tables.
+ // There shouldn't be other chains with the same name here.
+ outputChain = "output"
+ inputChain = "input"
+ // key assigned to every fw rule we add, in order to get rules by this key.
+ fwKey = "opensnitch-key"
+)
+
+var (
+ filterTable = &nftables.Table{
+ Family: nftables.TableFamilyIPv4,
+ Name: filterTableName,
+ }
+ filterTable6 = &nftables.Table{
+ Family: nftables.TableFamilyIPv6,
+ Name: filterTableName,
+ }
+ mangleTable = &nftables.Table{
+ Family: nftables.TableFamilyIPv4,
+ Name: mangleTableName,
+ }
+ mangleTable6 = &nftables.Table{
+ Family: nftables.TableFamilyIPv6,
+ Name: mangleTableName,
+ }
+)
+
+// Nft holds the fields of our nftables firewall
+type Nft struct {
+ sync.Mutex
+ config.Config
+ common.Common
+
+ conn *nftables.Conn
+
+ mangleTables []*nftables.Table
+ filterTables []*nftables.Table
+ outputChains map[*nftables.Table]*nftables.Chain
+ inputChains map[*nftables.Table]*nftables.Chain
+
+ chains iptables.SystemChains
+}
+
+// NewNft creates a new nftables object
+func NewNft() *nftables.Conn {
+ return &nftables.Conn{}
+}
+
+// Fw initializes a new nftables object
+func Fw() (*Nft, error) {
+ n := &Nft{
+ outputChains: make(map[*nftables.Table]*nftables.Chain),
+ inputChains: make(map[*nftables.Table]*nftables.Chain),
+ chains: iptables.SystemChains{Rules: make(map[string]config.FwRule)},
+ }
+ return n, nil
+}
+
+// Name returns the name of the firewall
+func (n *Nft) Name() string {
+ return Name
+}
+
+// Init inserts the firewall rules and starts monitoring for firewall
+// changes.
+func (n *Nft) Init(qNum *int) {
+ if n.IsRunning() {
+ return
+ }
+ n.SetQueueNum(qNum)
+ n.conn = NewNft()
+
+ // In order to clean up any existing firewall rule before start,
+ // we need to load the fw configuration first.
+ n.NewSystemFwConfig(n.preloadConfCallback)
+ go n.MonitorSystemFw(n.AddSystemRules)
+ n.LoadDiskConfiguration(false)
+
+ // start from a clean state
+ n.CleanRules(false)
+ n.AddSystemRules()
+
+ n.InsertRules()
+ // start monitoring firewall rules to intercept network traffic.
+ n.NewRulesChecker(n.AreRulesLoaded, n.reloadRulesCallback)
+
+ n.Running = true
+}
+
+// Stop deletes the firewall rules, allowing network traffic.
+func (n *Nft) Stop() {
+ if n.IsRunning() == false {
+ return
+ }
+ n.StopConfigWatcher()
+ n.StopCheckingRules()
+ n.CleanRules(log.GetLogLevel() == log.DEBUG)
+
+ n.Running = false
+}
+
+// InsertRules adds fw rules to intercept connections
+func (n *Nft) InsertRules() {
+ n.delInterceptionRules()
+ n.addGlobalTables()
+ n.addGlobalChains()
+
+ if err, _ := n.QueueDNSResponses(true, true); err != nil {
+ log.Error("Error while Running DNS nftables rule: %s", err)
+ } else if err, _ = n.QueueConnections(true, true); err != nil {
+ log.Fatal("Error while Running conntrack nftables rule: %s", err)
+ }
+}
+
+// CleanRules deletes the rules we added.
+func (n *Nft) CleanRules(logErrors bool) {
+ n.delInterceptionRules()
+ err := n.conn.Flush()
+ if err != nil && logErrors {
+ log.Error("Error cleaning nftables tables: %s", err)
+ }
+ n.DeleteSystemRules(true, logErrors)
+}
diff --git a/daemon/firewall/nftables/rules.go b/daemon/firewall/nftables/rules.go
new file mode 100644
index 0000000..8f1a7f6
--- /dev/null
+++ b/daemon/firewall/nftables/rules.go
@@ -0,0 +1,201 @@
+package nftables
+
+import (
+ "github.com/evilsocket/opensnitch/daemon/log"
+ "github.com/google/nftables"
+ "github.com/google/nftables/binaryutil"
+ "github.com/google/nftables/expr"
+ "github.com/vishvananda/netlink"
+ "golang.org/x/sys/unix"
+)
+
+func (n *Nft) addGlobalTables() error {
+ filter := n.conn.AddTable(filterTable)
+ filter6 := n.conn.AddTable(filterTable6)
+
+ mangle := n.conn.AddTable(mangleTable)
+ mangle6 := n.conn.AddTable(mangleTable6)
+ n.mangleTables = []*nftables.Table{mangle, mangle6}
+ n.filterTables = []*nftables.Table{filter, filter6}
+
+ // apply changes
+ if err := n.conn.Flush(); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// TODO: add more parameters, make it more generic
+func (n *Nft) addChain(name string, table *nftables.Table, prio *nftables.ChainPriority, ctype nftables.ChainType, hook *nftables.ChainHook) *nftables.Chain {
+ // nft list chains
+ return n.conn.AddChain(&nftables.Chain{
+ Name: name,
+ Table: table,
+ Type: ctype,
+ Hooknum: hook,
+ Priority: prio,
+ //Policy: nftables.ChainPolicyDrop
+ })
+}
+
+func (n *Nft) addGlobalChains() error {
+ // nft list tables
+ for _, table := range n.mangleTables {
+ n.outputChains[table] = n.addChain(outputChain, table, nftables.ChainPriorityMangle, nftables.ChainTypeRoute, nftables.ChainHookOutput)
+ }
+ for _, table := range n.filterTables {
+ n.inputChains[table] = n.addChain(inputChain, table, nftables.ChainPriorityFilter, nftables.ChainTypeFilter, nftables.ChainHookInput)
+ }
+ // apply changes
+ if err := n.conn.Flush(); err != nil {
+ log.Warning("Error adding nftables mangle tables: %v", err)
+ }
+
+ return nil
+}
+
+// QueueDNSResponses redirects DNS responses to us, in order to keep a cache
+// of resolved domains.
+// nft insert rule ip filter input udp sport 53 queue num 0 bypass
+func (n *Nft) QueueDNSResponses(enable bool, logError bool) (error, error) {
+ if n.conn == nil {
+ return nil, nil
+ }
+ for _, table := range n.filterTables {
+ // nft list ruleset -a
+ n.conn.InsertRule(&nftables.Rule{
+ Position: 0,
+ Table: table,
+ Chain: n.inputChains[table],
+ Exprs: []expr.Any{
+ &expr.Meta{Key: expr.MetaKeyL4PROTO, Register: 1},
+ &expr.Cmp{
+ Op: expr.CmpOpEq,
+ Register: 1,
+ Data: []byte{unix.IPPROTO_UDP},
+ },
+ &expr.Payload{
+ DestRegister: 1,
+ Base: expr.PayloadBaseTransportHeader,
+ Offset: 0,
+ Len: 2,
+ },
+ &expr.Cmp{
+ Op: expr.CmpOpEq,
+ Register: 1,
+ Data: binaryutil.BigEndian.PutUint16(uint16(53)),
+ },
+ &expr.Queue{
+ Num: n.QueueNum,
+ Flag: expr.QueueFlagBypass,
+ },
+ },
+ // rule key, to allow get it later by key
+ UserData: []byte(fwKey),
+ })
+ }
+ // apply changes
+ if err := n.conn.Flush(); err != nil {
+ return err, nil
+ }
+
+ return nil, nil
+}
+
+// QueueConnections inserts the firewall rule which redirects connections to us.
+// They are queued until the user denies/accept them, or reaches a timeout.
+// nft insert rule ip mangle OUTPUT ct state new queue num 0 bypass
+func (n *Nft) QueueConnections(enable bool, logError bool) (error, error) {
+ if n.conn == nil {
+ return nil, nil
+ }
+ if enable {
+ // flush conntrack as soon as netfilter rule is set. This ensures that already-established
+ // connections will go to netfilter queue.
+ if err := netlink.ConntrackTableFlush(netlink.ConntrackTable); err != nil {
+ log.Error("nftables, error in ConntrackTableFlush %s", err)
+ }
+ }
+
+ for _, table := range n.mangleTables {
+ n.conn.InsertRule(&nftables.Rule{
+ Position: 0,
+ Table: table,
+ Chain: n.outputChains[table],
+ Exprs: []expr.Any{
+ &expr.Ct{Register: 1, SourceRegister: false, Key: expr.CtKeySTATE},
+ &expr.Bitwise{
+ SourceRegister: 1,
+ DestRegister: 1,
+ Len: 4,
+ Mask: binaryutil.NativeEndian.PutUint32(expr.CtStateBitNEW | expr.CtStateBitRELATED),
+ Xor: binaryutil.NativeEndian.PutUint32(0),
+ },
+ &expr.Cmp{Op: expr.CmpOpNeq, Register: 1, Data: []byte{0, 0, 0, 0}},
+ &expr.Queue{
+ Num: n.QueueNum,
+ Flag: expr.QueueFlagBypass,
+ },
+ },
+ // rule key, to allow get it later by key
+ UserData: []byte(fwKey),
+ })
+ }
+ // apply changes
+ if err := n.conn.Flush(); err != nil {
+ return err, nil
+ }
+
+ return nil, nil
+}
+
+func (n *Nft) delInterceptionRules() {
+ n.delRulesByKey(fwKey)
+}
+
+func (n *Nft) delRulesByKey(key string) {
+ chains, err := n.conn.ListChains()
+ if err != nil {
+ log.Warning("nftables, error listing chains: %s", err)
+ return
+ }
+ commit := false
+ for _, c := range chains {
+ rules, err := n.conn.GetRule(c.Table, c)
+ if err != nil {
+ log.Warning("nftables, error listing rules (%s): %s", c.Table.Name, err)
+ continue
+ }
+
+ commit = false
+ for _, r := range rules {
+ if string(r.UserData) != key {
+ continue
+ }
+ // just passing the rule object doesn't work.
+ if err := n.conn.DelRule(&nftables.Rule{
+ Table: c.Table,
+ Chain: c,
+ Handle: r.Handle,
+ }); err != nil {
+ log.Warning("nftables, error adding rule to be deleted (%s/%s): %s", c.Table.Name, c.Name, err)
+ continue
+ }
+ commit = true
+ }
+ if commit {
+ if err := n.conn.Flush(); err != nil {
+ log.Warning("nftables, error deleting interception rules (%s/%s): %s", c.Table.Name, c.Name, err)
+ }
+ }
+ if rules, err := n.conn.GetRule(c.Table, c); err == nil {
+ if commit && len(rules) == 0 {
+ n.conn.DelChain(c)
+ n.conn.Flush()
+ }
+ }
+ }
+
+ return
+}
diff --git a/daemon/firewall/nftables/system.go b/daemon/firewall/nftables/system.go
new file mode 100644
index 0000000..2d7497c
--- /dev/null
+++ b/daemon/firewall/nftables/system.go
@@ -0,0 +1,40 @@
+package nftables
+
+import (
+ "github.com/evilsocket/opensnitch/daemon/firewall/config"
+ "github.com/evilsocket/opensnitch/daemon/log"
+)
+
+// CreateSystemRule create the custom firewall chains and adds them to system.
+// nft insert rule ip opensnitch-filter opensnitch-input udp dport 1153
+func (n *Nft) CreateSystemRule(rule *config.FwRule, logErrors bool) {
+ // TODO
+}
+
+// DeleteSystemRules deletes the system rules.
+// If force is false and the rule has not been previously added,
+// it won't try to delete the rules. Otherwise it'll try to delete them.
+func (n *Nft) DeleteSystemRules(force, logErrors bool) {
+ // TODO
+}
+
+// AddSystemRule inserts a new rule.
+func (n *Nft) AddSystemRule(rule *config.FwRule, enable bool) (error, error) {
+ // TODO
+ return nil, nil
+}
+
+// AddSystemRules creates the system firewall from configuration
+func (n *Nft) AddSystemRules() {
+ n.DeleteSystemRules(true, false)
+
+ for _, r := range n.SysConfig.SystemRules {
+ n.CreateSystemRule(r.Rule, true)
+ n.AddSystemRule(r.Rule, true)
+ }
+}
+
+// preloadConfCallback gets called before the fw configuration is reloaded
+func (n *Nft) preloadConfCallback() {
+ n.DeleteSystemRules(true, log.GetLogLevel() == log.DEBUG)
+}
diff --git a/daemon/firewall/rules.go b/daemon/firewall/rules.go
new file mode 100644
index 0000000..4b83388
--- /dev/null
+++ b/daemon/firewall/rules.go
@@ -0,0 +1,85 @@
+package firewall
+
+import (
+ "github.com/evilsocket/opensnitch/daemon/firewall/config"
+ "github.com/evilsocket/opensnitch/daemon/firewall/iptables"
+ "github.com/evilsocket/opensnitch/daemon/firewall/nftables"
+ "github.com/evilsocket/opensnitch/daemon/log"
+)
+
+// Firewall is the interface that all firewalls (iptables, nftables) must implement.
+type Firewall interface {
+ Init(*int)
+ Stop()
+ Name() string
+ IsRunning() bool
+ SetQueueNum(num *int)
+
+ InsertRules()
+ QueueDNSResponses(bool, bool) (error, error)
+ QueueConnections(bool, bool) (error, error)
+ CleanRules(bool)
+
+ AddSystemRules()
+ DeleteSystemRules(bool, bool)
+ AddSystemRule(*config.FwRule, bool) (error, error)
+ CreateSystemRule(*config.FwRule, bool)
+}
+
+var fw Firewall
+
+// IsRunning returns if the firewall is running or not.
+func IsRunning() bool {
+ return fw != nil && fw.IsRunning()
+}
+
+// CleanRules deletes the rules we added.
+func CleanRules(logErrors bool) {
+ if fw == nil {
+ return
+ }
+ fw.CleanRules(logErrors)
+}
+
+// Stop deletes the firewall rules, allowing network traffic.
+func Stop() {
+ if fw == nil {
+ return
+ }
+ fw.Stop()
+}
+
+// Init initializes the firewall and loads firewall rules.
+func Init(fwType string, qNum *int) {
+ var err error
+
+ if fwType == iptables.Name {
+ fw, err = iptables.Fw()
+ if err != nil {
+ log.Warning("iptables not available: %s", err)
+ }
+ }
+
+ // if iptables is not installed, we can add nftables rules directly to the kernel,
+ // without relying on any binaries.
+ if fwType == nftables.Name || err != nil {
+ fw, err = nftables.Fw()
+ if err != nil {
+ log.Warning("nftables not available: %s", err)
+ }
+ }
+
+ if err != nil {
+ log.Warning("firewall error: %s, not iptables nor nftables are available or are usable. Please, report it on github.", err)
+ return
+ }
+
+ if fw == nil {
+ log.Error("firewall not initialized.")
+ return
+ }
+ fw.Stop()
+ fw.Init(qNum)
+
+ log.Info("Using %s firewall", fw.Name())
+}
diff --git a/daemon/go.mod b/daemon/go.mod
new file mode 100644
index 0000000..35d5145
--- /dev/null
+++ b/daemon/go.mod
@@ -0,0 +1,16 @@
+module github.com/evilsocket/opensnitch/daemon
+
+go 1.15
+
+require (
+ github.com/fsnotify/fsnotify v1.4.7
+ github.com/golang/protobuf v1.5.0
+ github.com/google/gopacket v1.1.14
+ github.com/google/nftables v0.1.0
+ github.com/iovisor/gobpf v0.2.0
+ github.com/vishvananda/netlink v0.0.0-20210811191823-e1a867c6b452
+ golang.org/x/net v0.0.0-20191028085509-fe3aa8a45271
+ golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1
+ google.golang.org/grpc v1.32.0
+ google.golang.org/protobuf v1.26.0
+)
diff --git a/daemon/log/log.go b/daemon/log/log.go
new file mode 100644
index 0000000..493f8e5
--- /dev/null
+++ b/daemon/log/log.go
@@ -0,0 +1,212 @@
+package log
+
+import (
+ "fmt"
+ "os"
+ "strings"
+ "sync"
+ "time"
+)
+
+type Handler func(format string, args ...interface{})
+
+// https://misc.flogisoft.com/bash/tip_colors_and_formatting
+const (
+ BOLD = "\033[1m"
+ DIM = "\033[2m"
+
+ RED = "\033[31m"
+ GREEN = "\033[32m"
+ BLUE = "\033[34m"
+ YELLOW = "\033[33m"
+
+ FG_BLACK = "\033[30m"
+ FG_WHITE = "\033[97m"
+
+ BG_DGRAY = "\033[100m"
+ BG_RED = "\033[41m"
+ BG_GREEN = "\033[42m"
+ BG_YELLOW = "\033[43m"
+ BG_LBLUE = "\033[104m"
+
+ RESET = "\033[0m"
+)
+
+// log level constants
+const (
+ DEBUG = iota
+ INFO
+ IMPORTANT
+ WARNING
+ ERROR
+ FATAL
+)
+
+//
+var (
+ WithColors = true
+ Output = os.Stdout
+ StdoutFile = "/dev/stdout"
+ DateFormat = "2006-01-02 15:04:05"
+ MinLevel = INFO
+
+ mutex = &sync.RWMutex{}
+ labels = map[int]string{
+ DEBUG: "DBG",
+ INFO: "INF",
+ IMPORTANT: "IMP",
+ WARNING: "WAR",
+ ERROR: "ERR",
+ FATAL: "!!!",
+ }
+ colors = map[int]string{
+ DEBUG: DIM + FG_BLACK + BG_DGRAY,
+ INFO: FG_WHITE + BG_GREEN,
+ IMPORTANT: FG_WHITE + BG_LBLUE,
+ WARNING: FG_WHITE + BG_YELLOW,
+ ERROR: FG_WHITE + BG_RED,
+ FATAL: FG_WHITE + BG_RED + BOLD,
+ }
+)
+
+// Wrap wraps a text with effects
+func Wrap(s, effect string) string {
+ if WithColors == true {
+ s = effect + s + RESET
+ }
+ return s
+}
+
+// Dim dims a text
+func Dim(s string) string {
+ return Wrap(s, DIM)
+}
+
+// Bold bolds a text
+func Bold(s string) string {
+ return Wrap(s, BOLD)
+}
+
+// Red reds the text
+func Red(s string) string {
+ return Wrap(s, RED)
+}
+
+// Green greens the text
+func Green(s string) string {
+ return Wrap(s, GREEN)
+}
+
+// Blue blues the text
+func Blue(s string) string {
+ return Wrap(s, BLUE)
+}
+
+// Yellow yellows the text
+func Yellow(s string) string {
+ return Wrap(s, YELLOW)
+}
+
+// Raw prints out a text without colors
+func Raw(format string, args ...interface{}) {
+ mutex.Lock()
+ defer mutex.Unlock()
+ fmt.Fprintf(Output, format, args...)
+}
+
+// SetLogLevel sets the log level
+func SetLogLevel(newLevel int) {
+ mutex.Lock()
+ defer mutex.Unlock()
+ MinLevel = newLevel
+}
+
+// GetLogLevel returns the current log level configured.
+func GetLogLevel() int {
+ mutex.Lock()
+ defer mutex.Unlock()
+
+ return MinLevel
+}
+
+// Log prints out a text with the given color and format
+func Log(level int, format string, args ...interface{}) {
+ mutex.Lock()
+ defer mutex.Unlock()
+ if level >= MinLevel {
+ label := labels[level]
+ color := colors[level]
+ when := time.Now().UTC().Format(DateFormat)
+
+ what := fmt.Sprintf(format, args...)
+ if strings.HasSuffix(what, "\n") == false {
+ what += "\n"
+ }
+
+ l := Dim("[%s]")
+ r := Wrap(" %s ", color) + " %s"
+
+ fmt.Fprintf(Output, l+" "+r, when, label, what)
+ }
+}
+
+func setDefaultLogOutput() {
+ mutex.Lock()
+ Output = os.Stdout
+ mutex.Unlock()
+}
+
+// OpenFile opens a file to print out the logs
+func OpenFile(logFile string) (err error) {
+ if logFile == StdoutFile {
+ setDefaultLogOutput()
+ return
+ }
+
+ if Output, err = os.OpenFile(logFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644); err != nil {
+ Error("Error opening log: %s %s", logFile, err)
+ //fallback to stdout
+ setDefaultLogOutput()
+ }
+ Important("Start writing logs to %s", logFile)
+
+ return err
+}
+
+// Close closes the current output file descriptor
+func Close() {
+ if Output != os.Stdout {
+ Output.Close()
+ }
+}
+
+// Debug is the log level for debugging purposes
+func Debug(format string, args ...interface{}) {
+ Log(DEBUG, format, args...)
+}
+
+// Info is the log level for informative messages
+func Info(format string, args ...interface{}) {
+ Log(INFO, format, args...)
+}
+
+// Important is the log level for things that must pay attention
+func Important(format string, args ...interface{}) {
+ Log(IMPORTANT, format, args...)
+}
+
+// Warning is the log level for non-critical errors
+func Warning(format string, args ...interface{}) {
+ Log(WARNING, format, args...)
+}
+
+// Error is the log level for errors that should be corrected
+func Error(format string, args ...interface{}) {
+ Log(ERROR, format, args...)
+}
+
+// Fatal is the log level for errors that must be corrected before continue
+func Fatal(format string, args ...interface{}) {
+ Log(FATAL, format, args...)
+ os.Exit(1)
+}
diff --git a/daemon/main.go b/daemon/main.go
new file mode 100644
index 0000000..254e65d
--- /dev/null
+++ b/daemon/main.go
@@ -0,0 +1,415 @@
+package main
+
+import (
+ "bytes"
+ "context"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ golog "log"
+ "os"
+ "os/signal"
+ "runtime"
+ "runtime/pprof"
+ "syscall"
+ "time"
+
+ "github.com/evilsocket/opensnitch/daemon/conman"
+ "github.com/evilsocket/opensnitch/daemon/core"
+ "github.com/evilsocket/opensnitch/daemon/dns"
+ "github.com/evilsocket/opensnitch/daemon/firewall"
+ "github.com/evilsocket/opensnitch/daemon/log"
+ "github.com/evilsocket/opensnitch/daemon/netfilter"
+ "github.com/evilsocket/opensnitch/daemon/netlink"
+ "github.com/evilsocket/opensnitch/daemon/procmon/monitor"
+ "github.com/evilsocket/opensnitch/daemon/rule"
+ "github.com/evilsocket/opensnitch/daemon/statistics"
+ "github.com/evilsocket/opensnitch/daemon/ui"
+)
+
+var (
+ showVersion = false
+ procmonMethod = ""
+ logFile = ""
+ rulesPath = "rules"
+ noLiveReload = false
+ queueNum = 0
+ repeatQueueNum int //will be set later to queueNum + 1
+ workers = 16
+ debug = false
+ warning = false
+ important = false
+ errorlog = false
+
+ uiSocket = ""
+ uiClient = (*ui.Client)(nil)
+
+ cpuProfile = ""
+ memProfile = ""
+
+ ctx = (context.Context)(nil)
+ cancel = (context.CancelFunc)(nil)
+ err = (error)(nil)
+ rules = (*rule.Loader)(nil)
+ stats = (*statistics.Statistics)(nil)
+ queue = (*netfilter.Queue)(nil)
+ repeatPktChan = (<-chan netfilter.Packet)(nil)
+ pktChan = (<-chan netfilter.Packet)(nil)
+ wrkChan = (chan netfilter.Packet)(nil)
+ sigChan = (chan os.Signal)(nil)
+ exitChan = (chan bool)(nil)
+)
+
+func init() {
+ flag.BoolVar(&showVersion, "version", debug, "Show daemon version of this executable and exit.")
+
+ flag.StringVar(&procmonMethod, "process-monitor-method", procmonMethod, "How to search for processes path. Options: ftrace, audit (experimental), ebpf (experimental), proc (default)")
+ flag.StringVar(&uiSocket, "ui-socket", uiSocket, "Path the UI gRPC service listener (https://github.com/grpc/grpc/blob/master/doc/naming.md).")
+ flag.StringVar(&rulesPath, "rules-path", rulesPath, "Path to load JSON rules from.")
+ flag.IntVar(&queueNum, "queue-num", queueNum, "Netfilter queue number.")
+ flag.IntVar(&workers, "workers", workers, "Number of concurrent workers.")
+ flag.BoolVar(&noLiveReload, "no-live-reload", debug, "Disable rules live reloading.")
+
+ flag.StringVar(&logFile, "log-file", logFile, "Write logs to this file instead of the standard output.")
+ flag.BoolVar(&debug, "debug", debug, "Enable debug level logs.")
+ flag.BoolVar(&warning, "warning", warning, "Enable warning level logs.")
+ flag.BoolVar(&important, "important", important, "Enable important level logs.")
+ flag.BoolVar(&errorlog, "error", errorlog, "Enable error level logs.")
+
+ flag.StringVar(&cpuProfile, "cpu-profile", cpuProfile, "Write CPU profile to this file.")
+ flag.StringVar(&memProfile, "mem-profile", memProfile, "Write memory profile to this file.")
+}
+
+func overwriteLogging() bool {
+ return debug || warning || important || errorlog || logFile != ""
+}
+
+func setupLogging() {
+ golog.SetOutput(ioutil.Discard)
+ if debug {
+ log.SetLogLevel(log.DEBUG)
+ } else if warning {
+ log.SetLogLevel(log.WARNING)
+ } else if important {
+ log.SetLogLevel(log.IMPORTANT)
+ } else if errorlog {
+ log.SetLogLevel(log.ERROR)
+ } else {
+ log.SetLogLevel(log.INFO)
+ }
+
+ var logFileToUse string
+ if logFile == "" {
+ logFileToUse = log.StdoutFile
+ } else {
+ logFileToUse = logFile
+ }
+ log.Close()
+ if err := log.OpenFile(logFileToUse); err != nil {
+ log.Error("Error opening user defined log: %s %s", logFileToUse, err)
+ }
+}
+
+func setupSignals() {
+ sigChan = make(chan os.Signal, 1)
+ exitChan = make(chan bool, workers+1)
+ signal.Notify(sigChan,
+ syscall.SIGHUP,
+ syscall.SIGINT,
+ syscall.SIGTERM,
+ syscall.SIGQUIT)
+ go func() {
+ sig := <-sigChan
+ log.Raw("\n")
+ log.Important("Got signal: %v", sig)
+ cancel()
+ }()
+}
+
+func worker(id int) {
+ log.Debug("Worker #%d started.", id)
+ for true {
+ select {
+ case <-ctx.Done():
+ goto Exit
+ default:
+ pkt, ok := <-wrkChan
+ if !ok {
+ log.Debug("worker channel closed %d", id)
+ goto Exit
+ }
+ onPacket(pkt)
+ }
+ }
+Exit:
+ log.Debug("worker #%d exit", id)
+}
+
+func setupWorkers() {
+ log.Debug("Starting %d workers ...", workers)
+ // setup the workers
+ wrkChan = make(chan netfilter.Packet)
+ for i := 0; i < workers; i++ {
+ go worker(i)
+ }
+}
+
+func doCleanup(queue, repeatQueue *netfilter.Queue) {
+ log.Info("Cleaning up ...")
+ firewall.Stop()
+ monitor.End()
+ uiClient.Close()
+ queue.Close()
+ repeatQueue.Close()
+
+ if cpuProfile != "" {
+ pprof.StopCPUProfile()
+ }
+
+ if memProfile != "" {
+ f, err := os.Create(memProfile)
+ if err != nil {
+ fmt.Printf("Could not create memory profile: %s\n", err)
+ return
+ }
+ defer f.Close()
+ runtime.GC() // get up-to-date statistics
+ if err := pprof.WriteHeapProfile(f); err != nil {
+ fmt.Printf("Could not write memory profile: %s\n", err)
+ }
+ }
+}
+
+func onPacket(packet netfilter.Packet) {
+ // DNS response, just parse, track and accept.
+ if dns.TrackAnswers(packet.Packet) == true {
+ packet.SetVerdictAndMark(netfilter.NF_ACCEPT, packet.Mark)
+ stats.OnDNSResponse()
+ return
+ }
+
+ // Parse the connection state
+ con := conman.Parse(packet, uiClient.InterceptUnknown())
+ if con == nil {
+ applyDefaultAction(&packet)
+ return
+ }
+ // accept our own connections
+ if con.Process.ID == os.Getpid() {
+ packet.SetVerdict(netfilter.NF_ACCEPT)
+ return
+ }
+
+ // search a match in preloaded rules
+ r := acceptOrDeny(&packet, con)
+
+ stats.OnConnectionEvent(con, r, r == nil)
+}
+
+func applyDefaultAction(packet *netfilter.Packet) {
+ if uiClient.DefaultAction() == rule.Allow {
+ packet.SetVerdictAndMark(netfilter.NF_ACCEPT, packet.Mark)
+ } else {
+ packet.SetVerdict(netfilter.NF_DROP)
+ }
+}
+
+func acceptOrDeny(packet *netfilter.Packet, con *conman.Connection) *rule.Rule {
+ r := rules.FindFirstMatch(con)
+ if r == nil {
+ // no rule matched
+ // Note that as soon as we set a verdict on a packet, the next packet in the netfilter queue
+ // will begin to be processed even if this function hasn't yet returned
+
+ // send a request to the UI client if
+ // 1) connected and running and 2) we are not already asking
+ if uiClient.Connected() == false || uiClient.GetIsAsking() == true {
+ applyDefaultAction(packet)
+ log.Debug("UI is not running or busy, connected: %v, running: %v", uiClient.Connected(), uiClient.GetIsAsking())
+ return nil
+ }
+
+ uiClient.SetIsAsking(true)
+ defer uiClient.SetIsAsking(false)
+
+ // In order not to block packet processing, we send our packet to a different netfilter queue
+ // and then immediately pull it back out of that queue
+ packet.SetRequeueVerdict(uint16(repeatQueueNum))
+
+ var o bool
+ var pkt netfilter.Packet
+ // don't wait for the packet longer than 1 sec
+ select {
+ case pkt, o = <-repeatPktChan:
+ if !o {
+ log.Debug("error while receiving packet from repeatPktChan")
+ return nil
+ }
+ case <-time.After(1 * time.Second):
+ log.Debug("timed out while receiving packet from repeatPktChan")
+ return nil
+ }
+
+ //check if the pulled out packet is the same we put in
+ if res := bytes.Compare(packet.Packet.Data(), pkt.Packet.Data()); res != 0 {
+ log.Error("The packet which was requeued has changed abruptly. This should never happen. Please report this incident to the Opensnitch developers. %v %v ", packet, pkt)
+ return nil
+ }
+ packet = &pkt
+
+ r = uiClient.Ask(con)
+ if r == nil {
+ log.Error("Invalid rule received, applying default action")
+ applyDefaultAction(packet)
+ return nil
+ }
+ ok := false
+ pers := ""
+ action := string(r.Action)
+ if r.Action == rule.Allow {
+ action = log.Green(action)
+ } else {
+ action = log.Red(action)
+ }
+
+ // check if and how the rule needs to be saved
+ if r.Duration == rule.Always {
+ pers = "Saved"
+ // add to the loaded rules and persist on disk
+ if err := rules.Add(r, true); err != nil {
+ log.Error("Error while saving rule: %s", err)
+ } else {
+ ok = true
+ }
+ } else {
+ pers = "Added"
+ // add to the rules but do not save to disk
+ if err := rules.Add(r, false); err != nil {
+ log.Error("Error while adding rule: %s", err)
+ } else {
+ ok = true
+ }
+ }
+
+ if ok {
+ log.Important("%s new rule: %s if %s", pers, action, r.Operator.String())
+ }
+
+ }
+ if packet == nil {
+ log.Debug("Packet nil after processing rules")
+ return r
+ }
+
+ if r.Enabled == false {
+ applyDefaultAction(packet)
+ ruleName := log.Green(r.Name)
+ log.Info("DISABLED (%s) %s %s -> %s:%d (%s)", uiClient.DefaultAction(), log.Bold(log.Green("â")), log.Bold(con.Process.Path), log.Bold(con.To()), con.DstPort, ruleName)
+
+ } else if r.Action == rule.Allow {
+ packet.SetVerdictAndMark(netfilter.NF_ACCEPT, packet.Mark)
+ ruleName := log.Green(r.Name)
+ if r.Operator.Operand == rule.OpTrue {
+ ruleName = log.Dim(r.Name)
+ }
+ log.Debug("%s %s -> %s:%d (%s)", log.Bold(log.Green("â")), log.Bold(con.Process.Path), log.Bold(con.To()), con.DstPort, ruleName)
+ } else {
+ if r.Action == rule.Reject {
+ netlink.KillSocket(con.Protocol, con.SrcIP, con.SrcPort, con.DstIP, con.DstPort)
+ }
+ packet.SetVerdict(netfilter.NF_DROP)
+
+ log.Debug("%s %s -> %s:%d (%s)", log.Bold(log.Red("â")), log.Bold(con.Process.Path), log.Bold(con.To()), con.DstPort, log.Red(r.Name))
+ }
+
+ return r
+}
+
+func main() {
+ ctx, cancel = context.WithCancel(context.Background())
+ defer cancel()
+ flag.Parse()
+
+ if showVersion {
+ fmt.Println(core.Version)
+ os.Exit(0)
+ }
+
+ setupLogging()
+
+ if cpuProfile != "" {
+ if f, err := os.Create(cpuProfile); err != nil {
+ log.Fatal("%s", err)
+ } else if err := pprof.StartCPUProfile(f); err != nil {
+ log.Fatal("%s", err)
+ }
+ }
+
+ log.Important("Starting %s v%s", core.Name, core.Version)
+
+ rulesPath, err := core.ExpandPath(rulesPath)
+ if err != nil {
+ log.Fatal("%s", err)
+ }
+
+ setupSignals()
+
+ log.Info("Loading rules from %s ...", rulesPath)
+ if rules, err = rule.NewLoader(!noLiveReload); err != nil {
+ log.Fatal("%s", err)
+ } else if err = rules.Load(rulesPath); err != nil {
+ log.Fatal("%s", err)
+ }
+ stats = statistics.New(rules)
+
+ // prepare the queue
+ setupWorkers()
+ queue, err := netfilter.NewQueue(uint16(queueNum))
+ if err != nil {
+ log.Warning("Is opensnitchd already running?")
+ log.Fatal("Error while creating queue #%d: %s", queueNum, err)
+ }
+ pktChan = queue.Packets()
+
+ repeatQueueNum = queueNum + 1
+ repeatQueue, rqerr := netfilter.NewQueue(uint16(repeatQueueNum))
+ if rqerr != nil {
+ log.Warning("Is opensnitchd already running?")
+ log.Fatal("Error while creating queue #%d: %s", repeatQueueNum, rqerr)
+ }
+ repeatPktChan = repeatQueue.Packets()
+
+ uiClient = ui.NewClient(uiSocket, stats, rules)
+ stats.SetConfig(uiClient.GetStatsConfig())
+
+ // queue is ready, run firewall rules
+ firewall.Init(uiClient.GetFirewallType(), &queueNum)
+
+ if overwriteLogging() {
+ setupLogging()
+ }
+ // overwrite monitor method from configuration if the user has passed
+ // the option via command line.
+ if procmonMethod != "" {
+ if err := monitor.ReconfigureMonitorMethod(procmonMethod); err != nil {
+ log.Warning("Unable to set process monitor method via parameter: %v", err)
+ }
+ }
+
+ log.Info("Running on netfilter queue #%d ...", queueNum)
+ for {
+ select {
+ case <-ctx.Done():
+ goto Exit
+ case pkt, ok := <-pktChan:
+ if !ok {
+ goto Exit
+ }
+ wrkChan <- pkt
+ }
+ }
+Exit:
+ close(wrkChan)
+ doCleanup(queue, repeatQueue)
+ os.Exit(0)
+}
diff --git a/daemon/netfilter/packet.go b/daemon/netfilter/packet.go
new file mode 100644
index 0000000..c515613
--- /dev/null
+++ b/daemon/netfilter/packet.go
@@ -0,0 +1,57 @@
+package netfilter
+
+import "C"
+
+import (
+ "github.com/google/gopacket"
+)
+
+// packet consts
+const (
+ IPv4 = 4
+)
+
+// Verdict holds the action to perform on a packet (NF_DROP, NF_ACCEPT, etc)
+type Verdict C.uint
+
+type VerdictContainer struct {
+ Verdict Verdict
+ Mark uint32
+ Packet []byte
+}
+
+// Packet holds the data of a network packet
+type Packet struct {
+ Packet gopacket.Packet
+ Mark uint32
+ verdictChannel chan VerdictContainer
+ UID uint32
+ NetworkProtocol uint8
+}
+
+// SetVerdict emits a veredict on a packet
+func (p *Packet) SetVerdict(v Verdict) {
+ p.verdictChannel <- VerdictContainer{Verdict: v, Packet: nil, Mark: 0}
+}
+
+// SetVerdictAndMark emits a veredict on a packet and marks it in order to not
+// analyze it again.
+func (p *Packet) SetVerdictAndMark(v Verdict, mark uint32) {
+ p.verdictChannel <- VerdictContainer{Verdict: v, Packet: nil, Mark: mark}
+}
+
+func (p *Packet) SetRequeueVerdict(newQueueId uint16) {
+ v := uint(NF_QUEUE)
+ q := (uint(newQueueId) << 16)
+ v = v | q
+ p.verdictChannel <- VerdictContainer{Verdict: Verdict(v), Packet: nil, Mark: 0}
+}
+
+func (p *Packet) SetVerdictWithPacket(v Verdict, packet []byte) {
+ p.verdictChannel <- VerdictContainer{Verdict: v, Packet: packet, Mark: 0}
+}
+
+// IsIPv4 returns if the packet is IPv4
+func (p *Packet) IsIPv4() bool {
+ return p.NetworkProtocol == IPv4
+}
diff --git a/daemon/netfilter/queue.c b/daemon/netfilter/queue.c
new file mode 100644
index 0000000..f2b7ef6
--- /dev/null
+++ b/daemon/netfilter/queue.c
@@ -0,0 +1,2 @@
+#include "queue.h"
+
diff --git a/daemon/netfilter/queue.go b/daemon/netfilter/queue.go
new file mode 100644
index 0000000..902d1dd
--- /dev/null
+++ b/daemon/netfilter/queue.go
@@ -0,0 +1,242 @@
+package netfilter
+
+/*
+#cgo pkg-config: libnetfilter_queue
+#cgo CFLAGS: -Wall -I/usr/include
+#cgo LDFLAGS: -L/usr/lib64/ -ldl
+
+#include "queue.h"
+*/
+import "C"
+
+import (
+ "fmt"
+ "os"
+ "sync"
+ "time"
+ "unsafe"
+
+ "github.com/evilsocket/opensnitch/daemon/log"
+ "github.com/google/gopacket"
+ "github.com/google/gopacket/layers"
+)
+
+const (
+ AF_INET = 2
+ AF_INET6 = 10
+
+ NF_DROP Verdict = 0
+ NF_ACCEPT Verdict = 1
+ NF_STOLEN Verdict = 2
+ NF_QUEUE Verdict = 3
+ NF_REPEAT Verdict = 4
+ NF_STOP Verdict = 5
+
+ NF_DEFAULT_QUEUE_SIZE uint32 = 4096
+ NF_DEFAULT_PACKET_SIZE uint32 = 4096
+)
+
+var (
+ queueIndex = make(map[uint32]*chan Packet, 0)
+ queueIndexLock = sync.RWMutex{}
+ exitChan = make(chan bool, 1)
+
+ gopacketDecodeOptions = gopacket.DecodeOptions{Lazy: true, NoCopy: true}
+)
+
+// VerdictContainerC is the struct that contains the mark, action, length and
+// payload of a packet.
+// It's defined in queue.h, and filled on go_callback()
+type VerdictContainerC C.verdictContainer
+
+// Queue holds the information of a netfilter queue.
+// The handles of the connection to the kernel and the created queue.
+// A channel where the intercepted packets will be received.
+// The ID of the queue.
+type Queue struct {
+ h *C.struct_nfq_handle
+ qh *C.struct_nfq_q_handle
+ fd C.int
+ packets chan Packet
+ idx uint32
+}
+
+// NewQueue opens a new netfilter queue to receive packets marked with a mark.
+func NewQueue(queueID uint16) (q *Queue, err error) {
+ q = &Queue{
+ idx: uint32(time.Now().UnixNano()),
+ packets: make(chan Packet),
+ }
+
+ if err = q.create(queueID); err != nil {
+ return nil, err
+ } else if err = q.setup(); err != nil {
+ return nil, err
+ }
+
+ go q.run(exitChan)
+
+ return q, nil
+}
+
+func (q *Queue) create(queueID uint16) (err error) {
+ var ret C.int
+
+ if q.h, err = C.nfq_open(); err != nil {
+ return fmt.Errorf("Error opening Queue handle: %v", err)
+ } else if ret, err = C.nfq_unbind_pf(q.h, AF_INET); err != nil || ret < 0 {
+ return fmt.Errorf("Error unbinding existing q handler from AF_INET protocol family: %v", err)
+ } else if ret, err = C.nfq_unbind_pf(q.h, AF_INET6); err != nil || ret < 0 {
+ return fmt.Errorf("Error unbinding existing q handler from AF_INET6 protocol family: %v", err)
+ } else if ret, err := C.nfq_bind_pf(q.h, AF_INET); err != nil || ret < 0 {
+ return fmt.Errorf("Error binding to AF_INET protocol family: %v", err)
+ } else if ret, err := C.nfq_bind_pf(q.h, AF_INET6); err != nil || ret < 0 {
+ return fmt.Errorf("Error binding to AF_INET6 protocol family: %v", err)
+ } else if q.qh, err = C.CreateQueue(q.h, C.u_int16_t(queueID), C.u_int32_t(q.idx)); err != nil || q.qh == nil {
+ q.destroy()
+ return fmt.Errorf("Error binding to queue: %v", err)
+ }
+
+ queueIndexLock.Lock()
+ queueIndex[q.idx] = &q.packets
+ queueIndexLock.Unlock()
+
+ return nil
+}
+
+func (q *Queue) setup() (err error) {
+ var ret C.int
+
+ queueSize := C.u_int32_t(NF_DEFAULT_QUEUE_SIZE)
+ bufferSize := C.uint(NF_DEFAULT_PACKET_SIZE)
+ totSize := C.uint(NF_DEFAULT_QUEUE_SIZE * NF_DEFAULT_PACKET_SIZE)
+
+ if ret, err = C.nfq_set_queue_maxlen(q.qh, queueSize); err != nil || ret < 0 {
+ q.destroy()
+ return fmt.Errorf("Unable to set max packets in queue: %v", err)
+ } else if C.nfq_set_mode(q.qh, C.u_int8_t(2), bufferSize) < 0 {
+ q.destroy()
+ return fmt.Errorf("Unable to set packets copy mode: %v", err)
+ } else if q.fd, err = C.nfq_fd(q.h); err != nil {
+ q.destroy()
+ return fmt.Errorf("Unable to get queue file-descriptor. %v", err)
+ } else if C.nfnl_rcvbufsiz(C.nfq_nfnlh(q.h), totSize) < 0 {
+ q.destroy()
+ return fmt.Errorf("Unable to increase netfilter buffer space size")
+ }
+
+ return nil
+}
+
+func (q *Queue) run(exitCh chan<- bool) {
+ if errno := C.Run(q.h, q.fd); errno != 0 {
+ fmt.Fprintf(os.Stderr, "Terminating, unable to receive packet due to errno=%d", errno)
+ }
+ exitChan <- true
+}
+
+// Close ensures that nfqueue resources are freed and closed.
+// C.stop_reading_packets() stops the reading packets loop, which causes
+// go-subroutine run() to exit.
+// After exit, listening queue is destroyed and closed.
+// If for some reason any of the steps stucks while closing it, we'll exit by timeout.
+func (q *Queue) Close() {
+ close(q.packets)
+ C.stop_reading_packets()
+ q.destroy()
+ queueIndexLock.Lock()
+ delete(queueIndex, q.idx)
+ queueIndexLock.Unlock()
+}
+
+func (q *Queue) destroy() {
+ // we'll try to exit cleanly, but sometimes nfqueue gets stuck
+ time.AfterFunc(5*time.Second, func() {
+ log.Warning("queue stuck, closing by timeout")
+ if q != nil {
+ C.close(q.fd)
+ q.closeNfq()
+ }
+ os.Exit(0)
+ })
+ C.nfq_unbind_pf(q.h, AF_INET)
+ C.nfq_unbind_pf(q.h, AF_INET6)
+ if q.qh != nil {
+ if ret := C.nfq_destroy_queue(q.qh); ret != 0 {
+ log.Warning("Queue.destroy(), nfq_destroy_queue() not closed: %d", ret)
+ }
+ }
+
+ q.closeNfq()
+}
+
+func (q *Queue) closeNfq() {
+ if q.h != nil {
+ if ret := C.nfq_close(q.h); ret != 0 {
+ log.Warning("Queue.destroy(), nfq_close() not closed: %d", ret)
+ }
+ }
+}
+
+// Packets return the list of enqueued packets.
+func (q *Queue) Packets() <-chan Packet {
+ return q.packets
+}
+
+// FYI: the export keyword is mandatory to specify that go_callback is defined elsewhere
+
+//export go_callback
+func go_callback(queueID C.int, data *C.uchar, length C.int, mark C.uint, idx uint32, vc *VerdictContainerC, uid uint32) {
+ (*vc).verdict = C.uint(NF_ACCEPT)
+ (*vc).data = nil
+ (*vc).mark_set = 0
+ (*vc).length = 0
+
+ queueIndexLock.RLock()
+ queueChannel, found := queueIndex[idx]
+ queueIndexLock.RUnlock()
+ if !found {
+ fmt.Fprintf(os.Stderr, "Unexpected queue idx %d\n", idx)
+ return
+ }
+
+ xdata := C.GoBytes(unsafe.Pointer(data), length)
+
+ p := Packet{
+ verdictChannel: make(chan VerdictContainer),
+ Mark: uint32(mark),
+ UID: uid,
+ NetworkProtocol: xdata[0] >> 4, // first 4 bits is the version
+ }
+
+ var packet gopacket.Packet
+ if p.IsIPv4() {
+ packet = gopacket.NewPacket(xdata, layers.LayerTypeIPv4, gopacketDecodeOptions)
+ } else {
+ packet = gopacket.NewPacket(xdata, layers.LayerTypeIPv6, gopacketDecodeOptions)
+ }
+
+ p.Packet = packet
+
+ select {
+ case *queueChannel <- p:
+ select {
+ case v := <-p.verdictChannel:
+ if v.Packet == nil {
+ (*vc).verdict = C.uint(v.Verdict)
+ } else {
+ (*vc).verdict = C.uint(v.Verdict)
+ (*vc).data = (*C.uchar)(unsafe.Pointer(&v.Packet[0]))
+ (*vc).length = C.uint(len(v.Packet))
+ }
+
+ if v.Mark != 0 {
+ (*vc).mark_set = C.uint(1)
+ (*vc).mark = C.uint(v.Mark)
+ }
+ }
+
+ case <-time.After(1 * time.Millisecond):
+ fmt.Fprintf(os.Stderr, "Timed out while sending packet to queue channel %d\n", idx)
+ }
+}
diff --git a/daemon/netfilter/queue.h b/daemon/netfilter/queue.h
new file mode 100644
index 0000000..64c3ea7
--- /dev/null
+++ b/daemon/netfilter/queue.h
@@ -0,0 +1,113 @@
+#ifndef _NETFILTER_QUEUE_H
+#define _NETFILTER_QUEUE_H
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+typedef struct {
+ uint verdict;
+ uint mark;
+ uint mark_set;
+ uint length;
+ unsigned char *data;
+} verdictContainer;
+
+static void *get_uid = NULL;
+
+extern void go_callback(int id, unsigned char* data, int len, uint mark, u_int32_t idx, verdictContainer *vc, uint32_t uid);
+
+static uint8_t stop = 0;
+
+static inline void configure_uid_if_available(struct nfq_q_handle *qh){
+ void *hndl = dlopen("libnetfilter_queue.so.1", RTLD_LAZY);
+ if (!hndl) {
+ hndl = dlopen("libnetfilter_queue.so", RTLD_LAZY);
+ if (!hndl){
+ printf("WARNING: libnetfilter_queue not available\n");
+ return;
+ }
+ }
+ if ((get_uid = dlsym(hndl, "nfq_get_uid")) == NULL){
+ printf("WARNING: nfq_get_uid not available\n");
+ return;
+ }
+ printf("OK: libnetfiler_queue supports nfq_get_uid\n");
+#ifdef NFQA_CFG_F_UID_GID
+ if (qh != NULL && nfq_set_queue_flags(qh, NFQA_CFG_F_UID_GID, NFQA_CFG_F_UID_GID)){
+ printf("WARNING: UID not available on this kernel/libnetfilter_queue\n");
+ }
+#endif
+}
+
+static int nf_callback(struct nfq_q_handle *qh, struct nfgenmsg *nfmsg, struct nfq_data *nfa, void *arg){
+ if (stop) {
+ return -1;
+ }
+
+ uint32_t id = -1, idx = 0, mark = 0;
+ struct nfqnl_msg_packet_hdr *ph = NULL;
+ unsigned char *buffer = NULL;
+ int size = 0;
+ verdictContainer vc = {0};
+ uint32_t uid = 0xffffffff;
+
+ mark = nfq_get_nfmark(nfa);
+ ph = nfq_get_msg_packet_hdr(nfa);
+ id = ntohl(ph->packet_id);
+ size = nfq_get_payload(nfa, &buffer);
+ idx = (uint32_t)((uintptr_t)arg);
+
+#ifdef NFQA_CFG_F_UID_GID
+ if (get_uid)
+ nfq_get_uid(nfa, &uid);
+#endif
+
+ go_callback(id, buffer, size, mark, idx, &vc, uid);
+
+ if( vc.mark_set == 1 ) {
+ return nfq_set_verdict2(qh, id, vc.verdict, vc.mark, vc.length, vc.data);
+ }
+ return nfq_set_verdict2(qh, id, vc.verdict, vc.mark, vc.length, vc.data);
+}
+
+static inline struct nfq_q_handle* CreateQueue(struct nfq_handle *h, u_int16_t queue, u_int32_t idx) {
+ struct nfq_q_handle* qh = nfq_create_queue(h, queue, &nf_callback, (void*)((uintptr_t)idx));
+ if (qh == NULL){
+ printf("ERROR: nfq_create_queue() queue not created\n");
+ } else {
+ configure_uid_if_available(qh);
+ }
+ return qh;
+}
+
+static inline void stop_reading_packets() {
+ stop = 1;
+}
+
+static inline int Run(struct nfq_handle *h, int fd) {
+ char buf[4096] __attribute__ ((aligned));
+ int rcvd, opt = 1;
+
+ setsockopt(fd, SOL_NETLINK, NETLINK_NO_ENOBUFS, &opt, sizeof(int));
+
+ while ((rcvd = recv(fd, buf, sizeof(buf), 0)) >= 0) {
+ if (stop == 1) {
+ return errno;
+ }
+ nfq_handle_packet(h, buf, rcvd);
+ }
+
+ return errno;
+}
+
+#endif
diff --git a/daemon/netlink/socket.go b/daemon/netlink/socket.go
new file mode 100644
index 0000000..d9fd001
--- /dev/null
+++ b/daemon/netlink/socket.go
@@ -0,0 +1,153 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+ "strconv"
+ "syscall"
+
+ "github.com/evilsocket/opensnitch/daemon/log"
+)
+
+// GetSocketInfo asks the kernel via netlink for a given connection.
+// If the connection is found, we return the uid and the possible
+// associated inodes.
+// If the outgoing connection is not found but there're entries with the source
+// port and same protocol, add all the inodes to the list.
+//
+// Some examples:
+// outgoing connection as seen by netfilter || connection details dumped from kernel
+//
+// 47344:192.168.1.106 -> 151.101.65.140:443 || in kernel: 47344:192.168.1.106 -> 151.101.65.140:443
+// 8612:192.168.1.5 -> 192.168.1.255:8612 || in kernel: 8612:192.168.1.105 -> 0.0.0.0:0
+// 123:192.168.1.5 -> 217.144.138.234:123 || in kernel: 123:0.0.0.0 -> 0.0.0.0:0
+// 45015:127.0.0.1 -> 239.255.255.250:1900 || in kernel: 45015:127.0.0.1 -> 0.0.0.0:0
+// 50416:fe80::9fc2:ddcf:df22:aa50 -> fe80::1:53 || in kernel: 50416:254.128.0.0 -> 254.128.0.0:53
+// 51413:192.168.1.106 -> 103.224.182.250:1337 || in kernel: 51413:0.0.0.0 -> 0.0.0.0:0
+func GetSocketInfo(proto string, srcIP net.IP, srcPort uint, dstIP net.IP, dstPort uint) (uid int, inodes []int) {
+ uid = -1
+ family := uint8(syscall.AF_INET)
+ ipproto := uint8(syscall.IPPROTO_TCP)
+ protoLen := len(proto)
+ if proto[protoLen-1:protoLen] == "6" {
+ family = syscall.AF_INET6
+ }
+
+ if proto[:3] == "udp" {
+ ipproto = syscall.IPPROTO_UDP
+ if protoLen >= 7 && proto[:7] == "udplite" {
+ ipproto = syscall.IPPROTO_UDPLITE
+ }
+ }
+ if sockList, err := SocketGet(family, ipproto, uint16(srcPort), uint16(dstPort), srcIP, dstIP); err == nil {
+ for n, sock := range sockList {
+ if sock.UID != 0xffffffff {
+ uid = int(sock.UID)
+ }
+ log.Debug("[%d/%d] outgoing connection uid: %d, %d:%v -> %v:%d || netlink response: %d:%v -> %v:%d inode: %d - loopback: %v multicast: %v unspecified: %v linklocalunicast: %v ifaceLocalMulticast: %v GlobalUni: %v ",
+ n, len(sockList),
+ int(sock.UID),
+ srcPort, srcIP, dstIP, dstPort,
+ sock.ID.SourcePort, sock.ID.Source,
+ sock.ID.Destination, sock.ID.DestinationPort, sock.INode,
+ sock.ID.Destination.IsLoopback(),
+ sock.ID.Destination.IsMulticast(),
+ sock.ID.Destination.IsUnspecified(),
+ sock.ID.Destination.IsLinkLocalUnicast(),
+ sock.ID.Destination.IsLinkLocalMulticast(),
+ sock.ID.Destination.IsGlobalUnicast(),
+ )
+
+ if sock.ID.SourcePort == uint16(srcPort) && sock.ID.Source.Equal(srcIP) &&
+ (sock.ID.DestinationPort == uint16(dstPort)) &&
+ ((sock.ID.Destination.IsGlobalUnicast() || sock.ID.Destination.IsLoopback()) && sock.ID.Destination.Equal(dstIP)) {
+ inodes = append([]int{int(sock.INode)}, inodes...)
+ continue
+ }
+ log.Debug("GetSocketInfo() invalid: %d:%v -> %v:%d", sock.ID.SourcePort, sock.ID.Source, sock.ID.Destination, sock.ID.DestinationPort)
+ }
+
+ // handle special cases (see function description): ntp queries (123), broadcasts, incomming connections.
+ if len(inodes) == 0 && len(sockList) > 0 {
+ for n, sock := range sockList {
+ if sockList[n].ID.Destination.Equal(net.IPv4zero) || sockList[n].ID.Destination.Equal(net.IPv6zero) {
+ inodes = append([]int{int(sock.INode)}, inodes...)
+ log.Debug("netlink socket not found, adding entry: %d:%v -> %v:%d || %d:%v -> %v:%d inode: %d state: %s",
+ srcPort, srcIP, dstIP, dstPort,
+ sockList[n].ID.SourcePort, sockList[n].ID.Source,
+ sockList[n].ID.Destination, sockList[n].ID.DestinationPort,
+ sockList[n].INode, TCPStatesMap[sock.State])
+ } else if sock.ID.SourcePort == uint16(srcPort) && sock.ID.Source.Equal(srcIP) &&
+ (sock.ID.DestinationPort == uint16(dstPort)) {
+ inodes = append([]int{int(sock.INode)}, inodes...)
+ continue
+ } else {
+ log.Debug("netlink socket not found, EXCLUDING entry: %d:%v -> %v:%d || %d:%v -> %v:%d inode: %d state: %s",
+ srcPort, srcIP, dstIP, dstPort,
+ sockList[n].ID.SourcePort, sockList[n].ID.Source,
+ sockList[n].ID.Destination, sockList[n].ID.DestinationPort,
+ sockList[n].INode, TCPStatesMap[sock.State])
+ }
+ }
+ }
+ } else {
+ log.Debug("netlink socket error: %v - %d:%v -> %v:%d", err, srcPort, srcIP, dstIP, dstPort)
+ }
+
+ return uid, inodes
+}
+
+// GetSocketInfoByInode dumps the kernel sockets table and searches the given
+// inode on it.
+func GetSocketInfoByInode(inodeStr string) (*Socket, error) {
+ inode, err := strconv.ParseUint(inodeStr, 10, 32)
+ if err != nil {
+ return nil, err
+ }
+
+ type inetStruct struct{ family, proto uint8 }
+ socketTypes := []inetStruct{
+ {syscall.AF_INET, syscall.IPPROTO_TCP},
+ {syscall.AF_INET, syscall.IPPROTO_UDP},
+ {syscall.AF_INET6, syscall.IPPROTO_TCP},
+ {syscall.AF_INET6, syscall.IPPROTO_UDP},
+ }
+
+ for _, socket := range socketTypes {
+ socketList, err := SocketsDump(socket.family, socket.proto)
+ if err != nil {
+ return nil, err
+ }
+ for idx := range socketList {
+ if uint32(inode) == socketList[idx].INode {
+ return socketList[idx], nil
+ }
+ }
+ }
+ return nil, fmt.Errorf("Inode not found")
+}
+
+// KillSocket kills a socket given the properties of a connection.
+func KillSocket(proto string, srcIP net.IP, srcPort uint, dstIP net.IP, dstPort uint) {
+ family := uint8(syscall.AF_INET)
+ ipproto := uint8(syscall.IPPROTO_TCP)
+ protoLen := len(proto)
+ if proto[protoLen-1:protoLen] == "6" {
+ family = syscall.AF_INET6
+ }
+
+ if proto[:3] == "udp" {
+ ipproto = syscall.IPPROTO_UDP
+ if protoLen >= 7 && proto[:7] == "udplite" {
+ ipproto = syscall.IPPROTO_UDPLITE
+ }
+ }
+
+ if sockList, err := SocketGet(family, ipproto, uint16(srcPort), uint16(dstPort), srcIP, dstIP); err == nil {
+ for _, s := range sockList {
+ if err := socketKill(family, ipproto, s.ID); err != nil {
+ log.Debug("Unable to kill socket: %d, %d, %v", srcPort, dstPort, err)
+ }
+ }
+ }
+}
diff --git a/daemon/netlink/socket_linux.go b/daemon/netlink/socket_linux.go
new file mode 100644
index 0000000..944f278
--- /dev/null
+++ b/daemon/netlink/socket_linux.go
@@ -0,0 +1,264 @@
+package netlink
+
+import (
+ "encoding/binary"
+ "errors"
+ "fmt"
+ "net"
+ "syscall"
+
+ "github.com/evilsocket/opensnitch/daemon/log"
+ "github.com/vishvananda/netlink/nl"
+)
+
+// This is a modification of https://github.com/vishvananda/netlink socket_linux.go - Apache2.0 license
+// which adds support for query UDP, UDPLITE and IPv6 sockets to SocketGet()
+
+const (
+ SOCK_DESTROY = 21
+ sizeofSocketID = 0x30
+ sizeofSocketRequest = sizeofSocketID + 0x8
+ sizeofSocket = sizeofSocketID + 0x18
+)
+
+var (
+ native = nl.NativeEndian()
+ networkOrder = binary.BigEndian
+ TCP_ALL = uint32(0xfff)
+)
+
+// https://elixir.bootlin.com/linux/latest/source/include/net/tcp_states.h
+const (
+ TCP_INVALID = iota
+ TCP_ESTABLISHED
+ TCP_SYN_SENT
+ TCP_SYN_RECV
+ TCP_FIN_WAIT1
+ TCP_FIN_WAIT2
+ TCP_TIME_WAIT
+ TCP_CLOSE
+ TCP_CLOSE_WAIT
+ TCP_LAST_ACK
+ TCP_LISTEN
+ TCP_CLOSING
+ TCP_NEW_SYN_REC
+ TCP_MAX_STATES
+)
+
+// TCPStatesMap holds the list of TCP states
+var TCPStatesMap = map[uint8]string{
+ TCP_INVALID: "invalid",
+ TCP_ESTABLISHED: "established",
+ TCP_SYN_SENT: "syn_sent",
+ TCP_SYN_RECV: "syn_recv",
+ TCP_FIN_WAIT1: "fin_wait1",
+ TCP_FIN_WAIT2: "fin_wait2",
+ TCP_TIME_WAIT: "time_wait",
+ TCP_CLOSE: "close",
+ TCP_CLOSE_WAIT: "close_wait",
+ TCP_LAST_ACK: "last_ack",
+ TCP_LISTEN: "listen",
+ TCP_CLOSING: "closing",
+}
+
+// SocketID holds the socket information of a request/response to the kernel
+type SocketID struct {
+ SourcePort uint16
+ DestinationPort uint16
+ Source net.IP
+ Destination net.IP
+ Interface uint32
+ Cookie [2]uint32
+}
+
+// Socket represents a netlink socket.
+type Socket struct {
+ Family uint8
+ State uint8
+ Timer uint8
+ Retrans uint8
+ ID SocketID
+ Expires uint32
+ RQueue uint32
+ WQueue uint32
+ UID uint32
+ INode uint32
+}
+
+// SocketRequest holds the request/response of a connection to the kernel
+type SocketRequest struct {
+ Family uint8
+ Protocol uint8
+ Ext uint8
+ pad uint8
+ States uint32
+ ID SocketID
+}
+
+type writeBuffer struct {
+ Bytes []byte
+ pos int
+}
+
+func (b *writeBuffer) Write(c byte) {
+ b.Bytes[b.pos] = c
+ b.pos++
+}
+
+func (b *writeBuffer) Next(n int) []byte {
+ s := b.Bytes[b.pos : b.pos+n]
+ b.pos += n
+ return s
+}
+
+// Serialize convert SocketRequest struct to bytes.
+func (r *SocketRequest) Serialize() []byte {
+ b := writeBuffer{Bytes: make([]byte, sizeofSocketRequest)}
+ b.Write(r.Family)
+ b.Write(r.Protocol)
+ b.Write(r.Ext)
+ b.Write(r.pad)
+ native.PutUint32(b.Next(4), r.States)
+ networkOrder.PutUint16(b.Next(2), r.ID.SourcePort)
+ networkOrder.PutUint16(b.Next(2), r.ID.DestinationPort)
+ if r.Family == syscall.AF_INET6 {
+ copy(b.Next(16), r.ID.Source)
+ copy(b.Next(16), r.ID.Destination)
+ } else {
+ copy(b.Next(4), r.ID.Source.To4())
+ b.Next(12)
+ copy(b.Next(4), r.ID.Destination.To4())
+ b.Next(12)
+ }
+ native.PutUint32(b.Next(4), r.ID.Interface)
+ native.PutUint32(b.Next(4), r.ID.Cookie[0])
+ native.PutUint32(b.Next(4), r.ID.Cookie[1])
+ return b.Bytes
+}
+
+// Len returns the size of a socket request
+func (r *SocketRequest) Len() int { return sizeofSocketRequest }
+
+type readBuffer struct {
+ Bytes []byte
+ pos int
+}
+
+func (b *readBuffer) Read() byte {
+ c := b.Bytes[b.pos]
+ b.pos++
+ return c
+}
+
+func (b *readBuffer) Next(n int) []byte {
+ s := b.Bytes[b.pos : b.pos+n]
+ b.pos += n
+ return s
+}
+
+func (s *Socket) deserialize(b []byte) error {
+ if len(b) < sizeofSocket {
+ return fmt.Errorf("socket data short read (%d); want %d", len(b), sizeofSocket)
+ }
+ rb := readBuffer{Bytes: b}
+ s.Family = rb.Read()
+ s.State = rb.Read()
+ s.Timer = rb.Read()
+ s.Retrans = rb.Read()
+ s.ID.SourcePort = networkOrder.Uint16(rb.Next(2))
+ s.ID.DestinationPort = networkOrder.Uint16(rb.Next(2))
+ if s.Family == syscall.AF_INET6 {
+ s.ID.Source = net.IP(rb.Next(16))
+ s.ID.Destination = net.IP(rb.Next(16))
+ } else {
+ s.ID.Source = net.IPv4(rb.Read(), rb.Read(), rb.Read(), rb.Read())
+ rb.Next(12)
+ s.ID.Destination = net.IPv4(rb.Read(), rb.Read(), rb.Read(), rb.Read())
+ rb.Next(12)
+ }
+ s.ID.Interface = native.Uint32(rb.Next(4))
+ s.ID.Cookie[0] = native.Uint32(rb.Next(4))
+ s.ID.Cookie[1] = native.Uint32(rb.Next(4))
+ s.Expires = native.Uint32(rb.Next(4))
+ s.RQueue = native.Uint32(rb.Next(4))
+ s.WQueue = native.Uint32(rb.Next(4))
+ s.UID = native.Uint32(rb.Next(4))
+ s.INode = native.Uint32(rb.Next(4))
+ return nil
+}
+
+// SocketKill kills a connection
+func socketKill(family, proto uint8, sockID SocketID) error {
+
+ sockReq := &SocketRequest{
+ Family: family,
+ Protocol: proto,
+ ID: sockID,
+ }
+
+ req := nl.NewNetlinkRequest(SOCK_DESTROY, syscall.NLM_F_REQUEST|syscall.NLM_F_ACK)
+ req.AddData(sockReq)
+ _, err := req.Execute(syscall.NETLINK_INET_DIAG, 0)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+// SocketGet returns the list of active connections in the kernel
+// filtered by several fields. Currently it returns connections
+// filtered by source port and protocol.
+func SocketGet(family uint8, proto uint8, srcPort, dstPort uint16, local, remote net.IP) ([]*Socket, error) {
+ _Id := SocketID{
+ SourcePort: srcPort,
+ Cookie: [2]uint32{nl.TCPDIAG_NOCOOKIE, nl.TCPDIAG_NOCOOKIE},
+ }
+
+ sockReq := &SocketRequest{
+ Family: family,
+ Protocol: proto,
+ States: TCP_ALL,
+ ID: _Id,
+ }
+
+ return netlinkRequest(sockReq, family, proto, srcPort, dstPort, local, remote)
+}
+
+// SocketsDump returns the list of all connections from the kernel
+func SocketsDump(family uint8, proto uint8) ([]*Socket, error) {
+ sockReq := &SocketRequest{
+ Family: family,
+ Protocol: proto,
+ States: TCP_ALL,
+ }
+ return netlinkRequest(sockReq, 0, 0, 0, 0, nil, nil)
+}
+
+func netlinkRequest(sockReq *SocketRequest, family uint8, proto uint8, srcPort, dstPort uint16, local, remote net.IP) ([]*Socket, error) {
+ req := nl.NewNetlinkRequest(nl.SOCK_DIAG_BY_FAMILY, syscall.NLM_F_DUMP)
+ req.AddData(sockReq)
+ msgs, err := req.Execute(syscall.NETLINK_INET_DIAG, 0)
+ if err != nil {
+ return nil, err
+ }
+ if len(msgs) == 0 {
+ return nil, errors.New("Warning, no message nor error from netlink, or no connections found")
+ }
+ var sock []*Socket
+ for n, m := range msgs {
+ s := &Socket{}
+ if err = s.deserialize(m); err != nil {
+ log.Error("[%d] netlink socket error: %s, %d:%v -> %v:%d - %d:%v -> %v:%d",
+ n, TCPStatesMap[s.State],
+ srcPort, local, remote, dstPort,
+ s.ID.SourcePort, s.ID.Source, s.ID.Destination, s.ID.DestinationPort)
+ continue
+ }
+ if s.INode == 0 {
+ continue
+ }
+
+ sock = append([]*Socket{s}, sock...)
+ }
+ return sock, err
+}
diff --git a/daemon/netlink/socket_test.go b/daemon/netlink/socket_test.go
new file mode 100644
index 0000000..b37719b
--- /dev/null
+++ b/daemon/netlink/socket_test.go
@@ -0,0 +1,116 @@
+package netlink
+
+import (
+ "fmt"
+ "net"
+ "os"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+type Connection struct {
+ SrcIP net.IP
+ DstIP net.IP
+ Protocol string
+ SrcPort uint
+ DstPort uint
+ OutConn net.Conn
+ Listener net.Listener
+}
+
+func EstablishConnection(proto, dst string) (net.Conn, error) {
+ c, err := net.Dial(proto, dst)
+ if err != nil {
+ fmt.Println(err)
+ return nil, err
+ }
+ return c, nil
+}
+
+func ListenOnPort(proto, port string) (net.Listener, error) {
+ // TODO: UDP -> ListenUDP() or ListenPacket()
+ l, err := net.Listen(proto, port)
+ if err != nil {
+ fmt.Println(err)
+ return nil, err
+ }
+ return l, nil
+}
+
+func setupConnection(proto string, connChan chan *Connection) {
+ listnr, _ := ListenOnPort(proto, "127.0.0.1:55555")
+ conn, err := EstablishConnection(proto, "127.0.0.1:55555")
+ if err != nil {
+ connChan <- nil
+ return
+ }
+ laddr := strings.Split(conn.LocalAddr().String(), ":")
+ daddr := strings.Split(conn.RemoteAddr().String(), ":")
+ sport, _ := strconv.Atoi(laddr[1])
+ dport, _ := strconv.Atoi(daddr[1])
+
+ lconn := &Connection{
+ SrcPort: uint(sport),
+ DstPort: uint(dport),
+ SrcIP: net.ParseIP(laddr[0]),
+ DstIP: net.ParseIP(daddr[0]),
+ Protocol: "tcp",
+ Listener: listnr,
+ OutConn: conn,
+ }
+ connChan <- lconn
+}
+
+// TestNetlinkQueries tests queries to the kernel to get the inode of a connection.
+// When using ProcFS as monitor method, we need that value to get the PID of an application.
+// We also need it if for any reason auditd or ebpf doesn't return the PID of the application.
+// TODO: test all the cases described in the GetSocketInfo() description.
+func TestNetlinkTCPQueries(t *testing.T) {
+ // netlink tests disabled by default, they cause random failures on restricted
+ // environments.
+ if os.Getenv("NETLINK_TESTS") == "" {
+ t.Skip("Skipping netlink tests. Use NETLINK_TESTS=1 to launch these tests.")
+ }
+
+ connChan := make(chan *Connection)
+ go setupConnection("tcp", connChan)
+ conn := <-connChan
+ if conn == nil {
+ t.Error("TestParseTCPConnection, conn nil")
+ }
+
+ var inodes []int
+ uid := -1
+ t.Run("Test GetSocketInfo", func(t *testing.T) {
+ uid, inodes = GetSocketInfo("tcp", conn.SrcIP, conn.SrcPort, conn.DstIP, conn.DstPort)
+
+ if len(inodes) == 0 {
+ t.Error("inodes empty")
+ }
+ if uid != os.Getuid() {
+ t.Error("GetSocketInfo UID error:", uid, os.Getuid())
+ }
+ })
+
+ t.Run("Test GetSocketInfoByInode", func(t *testing.T) {
+ socket, err := GetSocketInfoByInode(fmt.Sprint(inodes[0]))
+ if err != nil {
+ t.Error("GetSocketInfoByInode error:", err)
+ }
+ if socket == nil {
+ t.Error("GetSocketInfoByInode inode not found")
+ }
+ if socket.ID.SourcePort != uint16(conn.SrcPort) {
+ t.Error("GetSocketInfoByInode dstPort error:", socket)
+ }
+ if socket.ID.DestinationPort != uint16(conn.DstPort) {
+ t.Error("GetSocketInfoByInode dstPort error:", socket)
+ }
+ if socket.UID != uint32(os.Getuid()) {
+ t.Error("GetSocketInfoByInode UID error:", socket, os.Getuid())
+ }
+ })
+
+ conn.Listener.Close()
+}
diff --git a/daemon/netstat/entry.go b/daemon/netstat/entry.go
new file mode 100644
index 0000000..6214a00
--- /dev/null
+++ b/daemon/netstat/entry.go
@@ -0,0 +1,32 @@
+package netstat
+
+import (
+ "net"
+)
+
+// Entry holds the information of a /proc/net/* entry.
+// For example, /proc/net/tcp:
+// sl local_address rem_address st tx_queue rx_queue tr tm->when retrnsmt uid timeout inode
+// 0: 0100007F:13AD 00000000:0000 0A 00000000:00000000 00:00000000 00000000 1000 0 18083222
+type Entry struct {
+ Proto string
+ SrcIP net.IP
+ SrcPort uint
+ DstIP net.IP
+ DstPort uint
+ UserId int
+ INode int
+}
+
+// NewEntry creates a new entry with values from /proc/net/
+func NewEntry(proto string, srcIP net.IP, srcPort uint, dstIP net.IP, dstPort uint, userId int, iNode int) Entry {
+ return Entry{
+ Proto: proto,
+ SrcIP: srcIP,
+ SrcPort: srcPort,
+ DstIP: dstIP,
+ DstPort: dstPort,
+ UserId: userId,
+ INode: iNode,
+ }
+}
diff --git a/daemon/netstat/find.go b/daemon/netstat/find.go
new file mode 100644
index 0000000..8560c65
--- /dev/null
+++ b/daemon/netstat/find.go
@@ -0,0 +1,51 @@
+package netstat
+
+import (
+ "net"
+ "strings"
+
+ "github.com/evilsocket/opensnitch/daemon/core"
+ "github.com/evilsocket/opensnitch/daemon/log"
+)
+
+// FindEntry looks for the connection in the list of known connections in ProcFS.
+func FindEntry(proto string, srcIP net.IP, srcPort uint, dstIP net.IP, dstPort uint) *Entry {
+ if entry := findEntryForProtocol(proto, srcIP, srcPort, dstIP, dstPort); entry != nil {
+ return entry
+ }
+
+ ipv6Suffix := "6"
+ if core.IPv6Enabled && strings.HasSuffix(proto, ipv6Suffix) == false {
+ otherProto := proto + ipv6Suffix
+ log.Debug("Searching for %s netstat entry instead of %s", otherProto, proto)
+ if entry := findEntryForProtocol(otherProto, srcIP, srcPort, dstIP, dstPort); entry != nil {
+ return entry
+ }
+ }
+
+ return &Entry{
+ Proto: proto,
+ SrcIP: srcIP,
+ SrcPort: srcPort,
+ DstIP: dstIP,
+ DstPort: dstPort,
+ UserId: -1,
+ INode: -1,
+ }
+}
+
+func findEntryForProtocol(proto string, srcIP net.IP, srcPort uint, dstIP net.IP, dstPort uint) *Entry {
+ entries, err := Parse(proto)
+ if err != nil {
+ log.Warning("Error while searching for %s netstat entry: %s", proto, err)
+ return nil
+ }
+
+ for _, entry := range entries {
+ if srcIP.Equal(entry.SrcIP) && srcPort == entry.SrcPort && dstIP.Equal(entry.DstIP) && dstPort == entry.DstPort {
+ return &entry
+ }
+ }
+
+ return nil
+}
diff --git a/daemon/netstat/parse.go b/daemon/netstat/parse.go
new file mode 100644
index 0000000..7d82279
--- /dev/null
+++ b/daemon/netstat/parse.go
@@ -0,0 +1,120 @@
+package netstat
+
+import (
+ "bufio"
+ "encoding/binary"
+ "fmt"
+ "net"
+ "os"
+ "regexp"
+ "strconv"
+
+ "github.com/evilsocket/opensnitch/daemon/core"
+ "github.com/evilsocket/opensnitch/daemon/log"
+)
+
+var (
+ parser = regexp.MustCompile(`(?i)` +
+ `\d+:\s+` + // sl
+ `([a-f0-9]{8,32}):([a-f0-9]{4})\s+` + // local_address
+ `([a-f0-9]{8,32}):([a-f0-9]{4})\s+` + // rem_address
+ `[a-f0-9]{2}\s+` + // st
+ `[a-f0-9]{8}:[a-f0-9]{8}\s+` + // tx_queue rx_queue
+ `[a-f0-9]{2}:[a-f0-9]{8}\s+` + // tr tm->when
+ `[a-f0-9]{8}\s+` + // retrnsmt
+ `(\d+)\s+` + // uid
+ `\d+\s+` + // timeout
+ `(\d+)\s+` + // inode
+ `.+`) // stuff we don't care about
+)
+
+func decToInt(n string) int {
+ d, err := strconv.ParseInt(n, 10, 64)
+ if err != nil {
+ log.Fatal("Error while parsing %s to int: %s", n, err)
+ }
+ return int(d)
+}
+
+func hexToInt(h string) uint {
+ d, err := strconv.ParseUint(h, 16, 64)
+ if err != nil {
+ log.Fatal("Error while parsing %s to int: %s", h, err)
+ }
+ return uint(d)
+}
+
+func hexToInt2(h string) (uint, uint) {
+ if len(h) > 16 {
+ d, err := strconv.ParseUint(h[:16], 16, 64)
+ if err != nil {
+ log.Fatal("Error while parsing %s to int: %s", h[16:], err)
+ }
+ d2, err := strconv.ParseUint(h[16:], 16, 64)
+ if err != nil {
+ log.Fatal("Error while parsing %s to int: %s", h[16:], err)
+ }
+ return uint(d), uint(d2)
+ }
+
+ d, err := strconv.ParseUint(h, 16, 64)
+ if err != nil {
+ log.Fatal("Error while parsing %s to int: %s", h[16:], err)
+ }
+ return uint(d), 0
+}
+
+func hexToIP(h string) net.IP {
+ n, m := hexToInt2(h)
+ var ip net.IP
+ if m != 0 {
+ ip = make(net.IP, 16)
+ // TODO: Check if this depends on machine endianness?
+ binary.LittleEndian.PutUint32(ip, uint32(n>>32))
+ binary.LittleEndian.PutUint32(ip[4:], uint32(n))
+ binary.LittleEndian.PutUint32(ip[8:], uint32(m>>32))
+ binary.LittleEndian.PutUint32(ip[12:], uint32(m))
+ } else {
+ ip = make(net.IP, 4)
+ binary.LittleEndian.PutUint32(ip, uint32(n))
+ }
+ return ip
+}
+
+// Parse scans and retrieves the opened connections, from /proc/net/ files
+func Parse(proto string) ([]Entry, error) {
+ filename := fmt.Sprintf("/proc/net/%s", proto)
+ fd, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer fd.Close()
+
+ entries := make([]Entry, 0)
+ scanner := bufio.NewScanner(fd)
+ for lineno := 0; scanner.Scan(); lineno++ {
+ // skip column names
+ if lineno == 0 {
+ continue
+ }
+
+ line := core.Trim(scanner.Text())
+ m := parser.FindStringSubmatch(line)
+ if m == nil {
+ log.Warning("Could not parse netstat line from %s: %s", filename, line)
+ continue
+ }
+
+ entries = append(entries, NewEntry(
+ proto,
+ hexToIP(m[1]),
+ hexToInt(m[2]),
+ hexToIP(m[3]),
+ hexToInt(m[4]),
+ decToInt(m[5]),
+ decToInt(m[6]),
+ ))
+ }
+
+ return entries, nil
+}
diff --git a/daemon/opensnitch.spec b/daemon/opensnitch.spec
new file mode 100644
index 0000000..00d9062
--- /dev/null
+++ b/daemon/opensnitch.spec
@@ -0,0 +1,97 @@
+Name: opensnitch
+Version: 1.5.8
+Release: 1%{?dist}
+Summary: OpenSnitch is a GNU/Linux application firewall
+
+License: GPLv3+
+URL: https://github.com/evilsocket/%{name}
+Source0: https://github.com/evilsocket/%{name}/releases/download/v%{version}/%{name}_%{version}.orig.tar.gz
+#BuildArch: x86_64
+
+#BuildRequires: godep
+Requires(post): info
+Requires(preun): info
+
+%description
+Whenever a program makes a connection, it'll prompt the user to allow or deny
+it.
+
+The user can decide if block the outgoing connection based on properties of
+the connection: by port, by uid, by dst ip, by program or a combination
+of them.
+
+These rules can last forever, until the app restart or just one time.
+
+The GUI allows the user to view live outgoing connections, as well as search
+by process, user, host or port.
+
+%prep
+rm -rf %{buildroot}
+
+%setup
+
+%build
+mkdir -p go/src/github.com/evilsocket
+ln -s $(pwd) go/src/github.com/evilsocket/opensnitch
+export GOPATH=$(pwd)/go
+cd go/src/github.com/evilsocket/opensnitch/
+make protocol
+cd go/src/github.com/evilsocket/opensnitch/daemon/
+go mod vendor
+go build -o opensnitchd .
+
+%install
+mkdir -p %{buildroot}/usr/bin/ %{buildroot}/usr/lib/systemd/system/ %{buildroot}/etc/opensnitchd/rules %{buildroot}/etc/logrotate.d
+sed -i 's/\/usr\/local/\/usr/' daemon/opensnitchd.service
+install -m 755 daemon/opensnitchd %{buildroot}/usr/bin/opensnitchd
+install -m 644 daemon/opensnitchd.service %{buildroot}/usr/lib/systemd/system/opensnitch.service
+install -m 644 debian/opensnitch.logrotate %{buildroot}/etc/logrotate.d/opensnitch
+
+B=""
+if [ -f /etc/opensnitchd/default-config.json ]; then
+ B="-b"
+fi
+install -m 644 -b $B daemon/default-config.json %{buildroot}/etc/opensnitchd/default-config.json
+
+B=""
+if [ -f /etc/opensnitchd/system-fw.json ]; then
+ B="-b"
+fi
+install -m 644 -b $B daemon/system-fw.json %{buildroot}/etc/opensnitchd/system-fw.json
+
+install -m 644 ebpf_prog/opensnitch.o %{buildroot}/etc/opensnitchd/opensnitch.o
+
+# upgrade, uninstall
+%preun
+systemctl stop opensnitch.service || true
+
+%post
+if [ $1 -eq 1 ]; then
+ systemctl enable opensnitch.service
+fi
+systemctl start opensnitch.service
+
+# uninstall,upgrade
+%postun
+if [ $1 -eq 0 ]; then
+ systemctl disable opensnitch.service
+fi
+if [ $1 -eq 0 -a -f /etc/logrotate.d/opensnitch ]; then
+ rm /etc/logrotate.d/opensnitch
+fi
+
+# postun is the last step after reinstalling
+if [ $1 -eq 1 ]; then
+ systemctl start opensnitch.service
+fi
+
+%clean
+rm -rf %{buildroot}
+
+%files
+%{_bindir}/opensnitchd
+/usr/lib/systemd/system/opensnitch.service
+%{_sysconfdir}/opensnitchd/default-config.json
+%{_sysconfdir}/opensnitchd/system-fw.json
+%{_sysconfdir}/opensnitchd/opensnitch.o
+%{_sysconfdir}/logrotate.d/opensnitch
diff --git a/daemon/opensnitchd.service b/daemon/opensnitchd.service
new file mode 100644
index 0000000..6c3e6df
--- /dev/null
+++ b/daemon/opensnitchd.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=OpenSnitch is a GNU/Linux port of the Little Snitch application firewall.
+Documentation=https://github.com/gustavo-iniguez-goya/opensnitch/wiki
+Wants=network.target
+After=network.target
+
+[Service]
+Type=simple
+PermissionsStartOnly=true
+ExecStartPre=/bin/mkdir -p /etc/opensnitchd/rules
+ExecStart=/usr/local/bin/opensnitchd -rules-path /etc/opensnitchd/rules
+Restart=always
+RestartSec=30
+
+[Install]
+WantedBy=multi-user.target
diff --git a/daemon/procmon/activepids.go b/daemon/procmon/activepids.go
new file mode 100644
index 0000000..92e7643
--- /dev/null
+++ b/daemon/procmon/activepids.go
@@ -0,0 +1,89 @@
+package procmon
+
+import (
+ "fmt"
+ "io/ioutil"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/evilsocket/opensnitch/daemon/log"
+)
+
+type value struct {
+ Process *Process
+ //Starttime uniquely identifies a process, it is the 22nd value in /proc//stat
+ //if another process starts with the same PID, it's Starttime will be unique
+ Starttime uint64
+}
+
+var (
+ activePids = make(map[uint64]value)
+ activePidsLock = sync.RWMutex{}
+)
+
+//MonitorActivePids checks that each process in activePids
+//is still running and if not running (or another process with the same pid is running),
+//removes the pid from activePids
+func MonitorActivePids() {
+ for {
+ time.Sleep(time.Second)
+ activePidsLock.Lock()
+ for k, v := range activePids {
+ data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/stat", k))
+ if err != nil {
+ //file does not exists, pid has quit
+ delete(activePids, k)
+ pidsCache.delete(int(k))
+ continue
+ }
+ startTime, err := strconv.ParseInt(strings.Split(string(data), " ")[21], 10, 64)
+ if err != nil {
+ log.Error("Could not find or convert Starttime. This should never happen. Please report this incident to the Opensnitch developers: %v", err)
+ delete(activePids, k)
+ pidsCache.delete(int(k))
+ continue
+ }
+ if uint64(startTime) != v.Starttime {
+ //extremely unlikely: the original process has quit and another process
+ //was started with the same PID - all this in less than 1 second
+ log.Error("Same PID but different Starttime. Please report this incident to the Opensnitch developers.")
+ delete(activePids, k)
+ pidsCache.delete(int(k))
+ continue
+ }
+ }
+ activePidsLock.Unlock()
+ }
+}
+
+func findProcessInActivePidsCache(pid uint64) *Process {
+ activePidsLock.Lock()
+ defer activePidsLock.Unlock()
+ if value, ok := activePids[pid]; ok {
+ return value.Process
+ }
+ return nil
+}
+
+func addToActivePidsCache(pid uint64, proc *Process) {
+
+ data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/stat", pid))
+ if err != nil {
+ //most likely the process has quit by now
+ return
+ }
+ startTime, err2 := strconv.ParseInt(strings.Split(string(data), " ")[21], 10, 64)
+ if err2 != nil {
+ log.Error("Could not find or convert Starttime. This should never happen. Please report this incident to the Opensnitch developers: %v", err)
+ return
+ }
+
+ activePidsLock.Lock()
+ activePids[pid] = value{
+ Process: proc,
+ Starttime: uint64(startTime),
+ }
+ activePidsLock.Unlock()
+}
diff --git a/daemon/procmon/activepids_test.go b/daemon/procmon/activepids_test.go
new file mode 100644
index 0000000..88ca598
--- /dev/null
+++ b/daemon/procmon/activepids_test.go
@@ -0,0 +1,104 @@
+package procmon
+
+import (
+ "fmt"
+ "math/rand"
+ "os"
+ "os/exec"
+ "syscall"
+ "testing"
+ "time"
+)
+
+//TestMonitorActivePids starts helper processes, adds them to activePids
+//and then kills them and checks if monitorActivePids() removed the killed processes
+//from activePids
+func TestMonitorActivePids(t *testing.T) {
+
+ if os.Getenv("helperBinaryMode") == "on" {
+ //we are in the "helper binary" mode, we were started with helperCmd.Start() (see below)
+ //do nothing, just wait to be killed
+ time.Sleep(time.Second * 10)
+ os.Exit(1) //will never get here; but keep it here just in case
+ }
+
+ //we are in a normal "go test" mode
+ tmpDir := "/tmp/ostest_" + randString()
+ os.Mkdir(tmpDir, 0777)
+ fmt.Println("tmp dir", tmpDir)
+ defer os.RemoveAll(tmpDir)
+
+ go MonitorActivePids()
+
+ //build a "helper binary" with "go test -c -o /tmp/path" and put it into a tmp dir
+ helperBinaryPath := tmpDir + "/helper1"
+ goExecutable, _ := exec.LookPath("go")
+ cmd := exec.Command(goExecutable, "test", "-c", "-o", helperBinaryPath)
+ if err := cmd.Run(); err != nil {
+ t.Error("Error running go test -c", err)
+ }
+
+ var numberOfHelpers = 5
+ var helperProcs []*Process
+ //start helper binaries
+ for i := 0; i < numberOfHelpers; i++ {
+ var helperCmd *exec.Cmd
+ helperCmd = &exec.Cmd{
+ Path: helperBinaryPath,
+ Args: []string{helperBinaryPath},
+ Env: []string{"helperBinaryMode=on"},
+ }
+ if err := helperCmd.Start(); err != nil {
+ t.Error("Error starting helper binary", err)
+ }
+ go func() {
+ helperCmd.Wait() //must Wait(), otherwise the helper process becomes a zombie when kill()ed
+ }()
+
+ pid := helperCmd.Process.Pid
+ proc := NewProcess(pid, helperBinaryPath)
+ helperProcs = append(helperProcs, proc)
+ addToActivePidsCache(uint64(pid), proc)
+ }
+ //sleep to make sure all processes started before we proceed
+ time.Sleep(time.Second * 1)
+ //make sure all PIDS are in the cache
+ for i := 0; i < numberOfHelpers; i++ {
+ proc := helperProcs[i]
+ pid := proc.ID
+ foundProc := findProcessInActivePidsCache(uint64(pid))
+ if foundProc == nil {
+ t.Error("PID not found among active processes", pid)
+ }
+ if proc.Path != foundProc.Path || proc.ID != foundProc.ID {
+ t.Error("PID or path doesn't match with the found process")
+ }
+ }
+ //kill all helpers except for one
+ for i := 0; i < numberOfHelpers-1; i++ {
+ if err := syscall.Kill(helperProcs[i].ID, syscall.SIGTERM); err != nil {
+ t.Error("error in syscall.Kill", err)
+ }
+ }
+ //give the cache time to remove killed processes
+ time.Sleep(time.Second * 1)
+
+ //make sure only the alive process is in the cache
+ foundProc := findProcessInActivePidsCache(uint64(helperProcs[numberOfHelpers-1].ID))
+ if foundProc == nil {
+ t.Error("last alive PID is not found among active processes", foundProc)
+ }
+ if len(activePids) != 1 {
+ t.Error("more than 1 active PIDs left in cache")
+ }
+}
+
+func randString() string {
+ rand.Seed(time.Now().UnixNano())
+ var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
+ b := make([]rune, 10)
+ for i := range b {
+ b[i] = letterRunes[rand.Intn(len(letterRunes))]
+ }
+ return string(b)
+}
diff --git a/daemon/procmon/audit/client.go b/daemon/procmon/audit/client.go
new file mode 100644
index 0000000..396cc06
--- /dev/null
+++ b/daemon/procmon/audit/client.go
@@ -0,0 +1,355 @@
+// Package audit reads auditd events from the builtin af_unix plugin, and parses
+// the messages in order to proactively monitor pids which make connections.
+// Once a connection is made and redirected to us via NFQUEUE, we
+// lookup the connection inode in /proc, and add the corresponding PID with all
+// the information of the process to a list of known PIDs.
+//
+// TODO: Prompt the user to allow/deny a connection/program as soon as it's
+// started.
+//
+// Requisities:
+// - install auditd and audispd-plugins
+// - enable af_unix plugin /etc/audisp/plugins.d/af_unix.conf (active = yes)
+// - auditctl -a always,exit -F arch=b64 -S socket,connect,execve -k opensnitchd
+// - increase /etc/audisp/audispd.conf q_depth if there're dropped events
+// - set write_logs to no if you don't need/want audit logs to be stored in the disk.
+//
+// read messages from the pipe to verify that it's working:
+// socat unix-connect:/var/run/audispd_events stdio
+//
+// Audit event fields:
+// https://github.com/linux-audit/audit-documentation/blob/master/specs/fields/field-dictionary.csv
+// Record types:
+// https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Security_Guide/sec-Audit_Record_Types.html
+//
+// Documentation:
+// https://github.com/linux-audit/audit-documentation
+package audit
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "net"
+ "os"
+ "runtime"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/evilsocket/opensnitch/daemon/core"
+ "github.com/evilsocket/opensnitch/daemon/log"
+)
+
+// Event represents an audit event, which in our case can be an event of type
+// socket, execve, socketpair or connect.
+type Event struct {
+ Timestamp string // audit(xxxxxxx:nnnn)
+ Serial string
+ ProcName string // comm
+ ProcPath string // exe
+ ProcCmdLine string // proctitle
+ ProcDir string // cwd
+ ProcMode string // mode
+ TTY string
+ Pid int
+ UID int
+ Gid int
+ PPid int
+ EUid int
+ EGid int
+ OUid int
+ OGid int
+ UserName string // auid
+ DstHost net.IP
+ DstPort int
+ NetFamily string // inet, inet6, local
+ Success string
+ INode int
+ Dev string
+ Syscall int
+ Exit int
+ EventType string
+ RawEvent string
+ LastSeen time.Time
+}
+
+// MaxEventAge is the maximum minutes an audit process can live without network activity.
+const (
+ MaxEventAge = int(10)
+)
+
+var (
+ // Lock holds a mutex
+ Lock sync.RWMutex
+ ourPid = os.Getpid()
+ // cache of events
+ events []*Event
+ eventsCleaner *time.Ticker
+ eventsCleanerChan = (chan bool)(nil)
+ // TODO: EventChan is an output channel where incoming auditd events will be written.
+ // If a client opens it.
+ EventChan = (chan Event)(nil)
+ eventsExitChan = (chan bool)(nil)
+ auditConn net.Conn
+ // TODO: we may need arm arch
+ rule64 = []string{"exit,always", "-F", "arch=b64", "-F", fmt.Sprint("ppid!=", ourPid), "-F", fmt.Sprint("pid!=", ourPid), "-S", "socket,connect", "-k", "opensnitch"}
+ rule32 = []string{"exit,always", "-F", "arch=b32", "-F", fmt.Sprint("ppid!=", ourPid), "-F", fmt.Sprint("pid!=", ourPid), "-S", "socketcall", "-F", "a0=1", "-k", "opensnitch"}
+ audispdPath = "/var/run/audispd_events"
+)
+
+// OPENSNITCH_RULES_KEY is the mark we place on every event we are interested in.
+const (
+ OpensnitchRulesKey = "key=\"opensnitch\""
+)
+
+// GetEvents returns the list of processes which have opened a connection.
+func GetEvents() []*Event {
+ return events
+}
+
+// GetEventByPid returns an event given a pid.
+func GetEventByPid(pid int) *Event {
+ Lock.RLock()
+ defer Lock.RUnlock()
+
+ for _, event := range events {
+ if pid == event.Pid {
+ return event
+ }
+ }
+
+ return nil
+}
+
+// sortEvents sorts received events by time and elapsed time since latest network activity.
+// newest PIDs will be placed on top of the list.
+func sortEvents() {
+ sort.Slice(events, func(i, j int) bool {
+ now := time.Now()
+ elapsedTimeT := now.Sub(events[i].LastSeen)
+ elapsedTimeU := now.Sub(events[j].LastSeen)
+ t := events[i].LastSeen.UnixNano()
+ u := events[j].LastSeen.UnixNano()
+ return t > u && elapsedTimeT < elapsedTimeU
+ })
+}
+
+// cleanOldEvents deletes the PIDs which do not exist or that are too old to
+// live.
+// We start searching from the oldest to the newest.
+// If the last network activity of a PID has been greater than MaxEventAge,
+// then it'll be deleted.
+func cleanOldEvents() {
+ Lock.Lock()
+ defer Lock.Unlock()
+
+ for n := len(events) - 1; n >= 0; n-- {
+ now := time.Now()
+ elapsedTime := now.Sub(events[n].LastSeen)
+ if int(elapsedTime.Minutes()) >= MaxEventAge {
+ events = append(events[:n], events[n+1:]...)
+ continue
+ }
+ if core.Exists(fmt.Sprint("/proc/", events[n].Pid)) == false {
+ events = append(events[:n], events[n+1:]...)
+ }
+ }
+}
+
+func deleteEvent(pid int) {
+ for n := range events {
+ if events[n].Pid == pid || events[n].PPid == pid {
+ deleteEventByIndex(n)
+ break
+ }
+ }
+}
+
+func deleteEventByIndex(index int) {
+ Lock.Lock()
+ events = append(events[:index], events[index+1:]...)
+ Lock.Unlock()
+}
+
+// AddEvent adds new event to the list of PIDs which have generate network
+// activity.
+// If the PID is already in the list, the LastSeen field is updated, to keep
+// it alive.
+func AddEvent(aevent *Event) {
+ if aevent == nil {
+ return
+ }
+ Lock.Lock()
+ defer Lock.Unlock()
+
+ for n := 0; n < len(events); n++ {
+ if events[n].Pid == aevent.Pid && events[n].Syscall == aevent.Syscall {
+ if aevent.ProcCmdLine != "" || (aevent.ProcCmdLine == events[n].ProcCmdLine) {
+ events[n] = aevent
+ }
+ events[n].LastSeen = time.Now()
+
+ sortEvents()
+ return
+ }
+ }
+ aevent.LastSeen = time.Now()
+ events = append([]*Event{aevent}, events...)
+}
+
+// startEventsCleaner will review if the events in the cache need to be cleaned
+// every 5 minutes.
+func startEventsCleaner() {
+ for {
+ select {
+ case <-eventsCleanerChan:
+ goto Exit
+ case <-eventsCleaner.C:
+ cleanOldEvents()
+ }
+ }
+Exit:
+ log.Debug("audit: cleanerRoutine stopped")
+}
+
+func addRules() bool {
+ r64 := append([]string{"-A"}, rule64...)
+ r32 := append([]string{"-A"}, rule32...)
+ _, err64 := core.Exec("auditctl", r64)
+ _, err32 := core.Exec("auditctl", r32)
+ if err64 == nil && err32 == nil {
+ return true
+ }
+ log.Error("Error adding audit rule, err32=%v, err=%v", err32, err64)
+ return false
+}
+
+func configureSyscalls() {
+ // XXX: what about a i386 process running on a x86_64 system?
+ if runtime.GOARCH == "386" {
+ syscallSOCKET = "1"
+ syscallCONNECT = "3"
+ syscallSOCKETPAIR = "8"
+ }
+}
+
+func deleteRules() bool {
+ r64 := []string{"-D", "-k", "opensnitch"}
+ r32 := []string{"-D", "-k", "opensnitch"}
+ _, err64 := core.Exec("auditctl", r64)
+ _, err32 := core.Exec("auditctl", r32)
+ if err64 == nil && err32 == nil {
+ return true
+ }
+ log.Error("Error deleting audit rules, err32=%v, err64=%v", err32, err64)
+ return false
+}
+
+func checkRules() bool {
+ // TODO
+ return true
+}
+
+func checkStatus() bool {
+ // TODO
+ return true
+}
+
+// Reader reads events from audisd af_unix pipe plugin.
+// If the auditd daemon is stopped or restarted, the reader handle
+// is closed, so we need to restablished the connection.
+func Reader(r io.Reader, eventChan chan<- Event) {
+ if r == nil {
+ log.Error("Error reading auditd events. Is auditd running? is af_unix plugin enabled?")
+ return
+ }
+ reader := bufio.NewReader(r)
+ go startEventsCleaner()
+
+ for {
+ select {
+ case <-eventsExitChan:
+ goto Exit
+ default:
+ buf, _, err := reader.ReadLine()
+ if err != nil {
+ if err == io.EOF {
+ log.Error("AuditReader: auditd stopped, reconnecting in 30s %s", err)
+ if newReader, err := reconnect(); err == nil {
+ reader = bufio.NewReader(newReader)
+ log.Important("Auditd reconnected, continue reading")
+ }
+ continue
+ }
+ log.Warning("AuditReader: auditd error %s", err)
+ break
+ }
+
+ parseEvent(string(buf[0:len(buf)]), eventChan)
+ }
+ }
+Exit:
+ log.Debug("audit.Reader() closed")
+}
+
+// StartChannel creates a channel to receive events from Audit.
+// Launch audit.Reader() in a goroutine:
+// go audit.Reader(c, (chan<- audit.Event)(audit.EventChan))
+func StartChannel() {
+ EventChan = make(chan Event, 0)
+}
+
+func reconnect() (net.Conn, error) {
+ deleteRules()
+ time.Sleep(30 * time.Second)
+ return connect()
+}
+
+func connect() (net.Conn, error) {
+ addRules()
+ // TODO: make the unix socket path configurable
+ return net.Dial("unix", audispdPath)
+}
+
+// Stop stops listening for events from auditd and delete the auditd rules.
+func Stop() {
+ if auditConn != nil {
+ if err := auditConn.Close(); err != nil {
+ log.Warning("audit.Stop() error closing socket: %v", err)
+ }
+ }
+
+ if eventsCleaner != nil {
+ eventsCleaner.Stop()
+ }
+ if eventsExitChan != nil {
+ eventsExitChan <- true
+ close(eventsExitChan)
+ }
+ if eventsCleanerChan != nil {
+ eventsCleanerChan <- true
+ close(eventsCleanerChan)
+ }
+
+ deleteRules()
+ if EventChan != nil {
+ close(EventChan)
+ }
+}
+
+// Start makes a new connection to the audisp af_unix socket.
+func Start() (net.Conn, error) {
+ auditConn, err := connect()
+ if err != nil {
+ log.Error("auditd Start() connection error %v", err)
+ deleteRules()
+ return nil, err
+ }
+
+ configureSyscalls()
+ eventsCleaner = time.NewTicker(time.Minute * 5)
+ eventsCleanerChan = make(chan bool)
+ eventsExitChan = make(chan bool)
+ return auditConn, err
+}
diff --git a/daemon/procmon/audit/parse.go b/daemon/procmon/audit/parse.go
new file mode 100644
index 0000000..a666588
--- /dev/null
+++ b/daemon/procmon/audit/parse.go
@@ -0,0 +1,298 @@
+package audit
+
+import (
+ "encoding/hex"
+ "fmt"
+ "net"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+var (
+ newEvent = false
+ netEvent = &Event{}
+
+ // RegExp for parse audit messages
+ // https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/security_guide/sec-understanding_audit_log_files
+ auditRE, _ = regexp.Compile(`([a-zA-Z0-9\-_]+)=([a-zA-Z0-9:'\-\/\"\.\,_\(\)]+)`)
+ rawEvent = make(map[string]string)
+)
+
+// amd64 syscalls definition
+// if the platform is not amd64, it's redefined on Start()
+var (
+ syscallSOCKET = "41"
+ syscallCONNECT = "42"
+ syscallSOCKETPAIR = "53"
+ syscallEXECVE = "59"
+ syscallSOCKETCALL = "102"
+)
+
+// /usr/include/x86_64-linux-gnu/bits/socket_type.h
+const (
+ sockSTREAM = "1"
+ sockDGRAM = "2"
+ sockRAW = "3"
+ sockSEQPACKET = "5"
+ sockPACKET = "10"
+
+ // /usr/include/x86_64-linux-gnu/bits/socket.h
+ pfUNSPEC = "0"
+ pfLOCAL = "1" // PF_UNIX
+ pfINET = "2"
+ pfINET6 = "10"
+
+ // /etc/protocols
+ protoIP = "0"
+ protoTCP = "6"
+ protoUDP = "17"
+)
+
+// https://access.redhat.com/documentation/en-US/Red_Hat_Enterprise_Linux/7/html/Security_Guide/sec-Audit_Record_Types.html
+const (
+ AuditTypePROCTITLE = "type=PROCTITLE"
+ AuditTypeCWD = "type=CWD"
+ AuditTypePATH = "type=PATH"
+ AuditTypeEXECVE = "type=EXECVE"
+ AuditTypeSOCKADDR = "type=SOCKADDR"
+ AuditTypeSOCKETCALL = "type=SOCKETCALL"
+ AuditTypeEOE = "type=EOE"
+)
+
+var (
+ syscallSOCKETstr = fmt.Sprint("syscall=", syscallSOCKET)
+ syscallCONNECTstr = fmt.Sprint("syscall=", syscallCONNECT)
+ syscallSOCKETPAIRstr = fmt.Sprint("syscall=", syscallSOCKETPAIR)
+ syscallEXECVEstr = fmt.Sprint("syscall=", syscallEXECVE)
+ syscallSOCKETCALLstr = fmt.Sprint("syscall=", syscallSOCKETCALL)
+)
+
+// parseNetLine parses a SOCKADDR message type of the form:
+// saddr string: inet6 host:2001:4860:4860::8888 serv:53
+func parseNetLine(line string, decode bool) (family string, dstHost net.IP, dstPort int) {
+
+ // 0:4 - type
+ // 4:8 - port
+ // 8:16 - ip
+ switch family := line[0:4]; family {
+ // local
+ // case "0100":
+ // ipv4
+ case "0200":
+ octet2 := decodeString(line[4:8])
+ octet := decodeString(line[8:16])
+ host := fmt.Sprint(octet[0], ".", octet[1], ".", octet[2], ".", octet[3])
+ fmt.Printf("dest ip: %s -- %s:%s\n", line[4:8], octet2, host)
+ // ipv6
+ //case "0A00":
+ }
+
+ if decode == true {
+ line = decodeString(line)
+ }
+ pieces := strings.Split(line, " ")
+ family = pieces[0]
+
+ if family[:4] != "inet" {
+ return family, dstHost, 0
+ }
+
+ if len(pieces) > 1 && pieces[1][:5] == "host:" {
+ dstHost = net.ParseIP(strings.Split(pieces[1], "host:")[1])
+ }
+ if len(pieces) > 2 && pieces[2][:5] == "serv:" {
+ _dstPort, err := strconv.Atoi(strings.Split(line, "serv:")[1])
+ if err != nil {
+ dstPort = -1
+ } else {
+ dstPort = _dstPort
+ }
+ }
+
+ return family, dstHost, dstPort
+}
+
+// decodeString will try to decode a string encoded in hexadecimal.
+// If the string can not be decoded, the original string will be returned.
+// In that case, usually it means that it's a non-encoded string.
+func decodeString(s string) string {
+ decoded, err := hex.DecodeString(s)
+ if err != nil {
+ return s
+ }
+ return fmt.Sprintf("%s", decoded)
+}
+
+// extractFields parsed an audit raw message, and extracts all the fields.
+func extractFields(rawMessage string, newEvent *map[string]string) {
+ Lock.Lock()
+ defer Lock.Unlock()
+
+ if auditRE == nil {
+ newEvent = nil
+ return
+ }
+ fieldList := auditRE.FindAllStringSubmatch(rawMessage, -1)
+ if fieldList == nil {
+ newEvent = nil
+ return
+ }
+ for _, field := range fieldList {
+ (*newEvent)[field[1]] = field[2]
+ }
+}
+
+// populateEvent populates our Event from a raw parsed message.
+func populateEvent(aevent *Event, eventFields *map[string]string) *Event {
+ if aevent == nil {
+ return nil
+ }
+ Lock.Lock()
+ defer Lock.Unlock()
+
+ for k, v := range *eventFields {
+ switch k {
+ //case "a0":
+ //case "a1":
+ //case "a2":
+ case "fam":
+ if v == "local" {
+ return nil
+ }
+ aevent.NetFamily = v
+ case "lport":
+ aevent.DstPort, _ = strconv.Atoi(v)
+ // TODO
+ /*case "addr":
+ fmt.Println("addr: ", v)
+ case "daddr":
+ fmt.Println("daddr: ", v)
+ case "laddr":
+ aevent.DstHost = net.ParseIP(v)
+ case "saddr":
+ parseNetLine(v, true)
+ fmt.Println("saddr:", v)
+ */
+ case "exe":
+ aevent.ProcPath = strings.Trim(decodeString(v), "\"")
+ case "comm":
+ aevent.ProcName = strings.Trim(decodeString(v), "\"")
+ // proctitle may be truncated to 128 characters, so don't rely on it, parse /proc//instead
+ //case "proctitle":
+ // aevent.ProcCmdLine = strings.Trim(decodeString(v), "\"")
+ case "tty":
+ aevent.TTY = v
+ case "pid":
+ aevent.Pid, _ = strconv.Atoi(v)
+ case "ppid":
+ aevent.PPid, _ = strconv.Atoi(v)
+ case "uid":
+ aevent.UID, _ = strconv.Atoi(v)
+ case "gid":
+ aevent.Gid, _ = strconv.Atoi(v)
+ case "success":
+ aevent.Success = v
+ case "cwd":
+ aevent.ProcDir = strings.Trim(decodeString(v), "\"")
+ case "inode":
+ aevent.INode, _ = strconv.Atoi(v)
+ case "dev":
+ aevent.Dev = v
+ case "mode":
+ aevent.ProcMode = v
+ case "ouid":
+ aevent.OUid, _ = strconv.Atoi(v)
+ case "ogid":
+ aevent.OGid, _ = strconv.Atoi(v)
+ case "syscall":
+ aevent.Syscall, _ = strconv.Atoi(v)
+ case "exit":
+ aevent.Exit, _ = strconv.Atoi(v)
+ case "type":
+ aevent.EventType = v
+ case "msg":
+ parts := strings.Split(v[6:], ":")
+ aevent.Timestamp = parts[0]
+ aevent.Serial = parts[1][:len(parts[1])-1]
+ }
+ }
+
+ return aevent
+}
+
+// parseEvent parses an auditd event, discards the unwanted ones, and adds
+// the ones we're interested in to an array.
+// We're only interested in the socket,socketpair,connect and execve syscalls.
+// Events from us are excluded.
+//
+// When we received an event, we parse and add it to the list as soon as we can.
+// If the next messages of the set have additional information, we update the
+// event.
+func parseEvent(rawMessage string, eventChan chan<- Event) {
+ if newEvent == false && strings.Index(rawMessage, OpensnitchRulesKey) == -1 {
+ return
+ }
+
+ aEvent := make(map[string]string)
+ if strings.Index(rawMessage, syscallSOCKETstr) != -1 ||
+ strings.Index(rawMessage, syscallCONNECTstr) != -1 ||
+ strings.Index(rawMessage, syscallSOCKETPAIRstr) != -1 ||
+ strings.Index(rawMessage, syscallEXECVEstr) != -1 ||
+ strings.Index(rawMessage, syscallSOCKETCALLstr) != -1 {
+
+ extractFields(rawMessage, &aEvent)
+ if aEvent == nil {
+ return
+ }
+ newEvent = true
+ netEvent = &Event{}
+ netEvent = populateEvent(netEvent, &aEvent)
+ AddEvent(netEvent)
+ } else if newEvent == true && strings.Index(rawMessage, AuditTypePROCTITLE) != -1 {
+ extractFields(rawMessage, &aEvent)
+ if aEvent == nil {
+ return
+ }
+ netEvent = populateEvent(netEvent, &aEvent)
+ AddEvent(netEvent)
+ } else if newEvent == true && strings.Index(rawMessage, AuditTypeCWD) != -1 {
+ extractFields(rawMessage, &aEvent)
+ if aEvent == nil {
+ return
+ }
+ netEvent = populateEvent(netEvent, &aEvent)
+ AddEvent(netEvent)
+ } else if newEvent == true && strings.Index(rawMessage, AuditTypeEXECVE) != -1 {
+ extractFields(rawMessage, &aEvent)
+ if aEvent == nil {
+ return
+ }
+ netEvent = populateEvent(netEvent, &aEvent)
+ AddEvent(netEvent)
+ } else if newEvent == true && strings.Index(rawMessage, AuditTypePATH) != -1 {
+ extractFields(rawMessage, &aEvent)
+ if aEvent == nil {
+ return
+ }
+ netEvent = populateEvent(netEvent, &aEvent)
+ AddEvent(netEvent)
+ } else if newEvent == true && strings.Index(rawMessage, AuditTypeSOCKADDR) != -1 {
+ extractFields(rawMessage, &aEvent)
+ if aEvent == nil {
+ return
+ }
+
+ netEvent = populateEvent(netEvent, &aEvent)
+ AddEvent(netEvent)
+ if EventChan != nil {
+ eventChan <- *netEvent
+ }
+ } else if newEvent == true && strings.Index(rawMessage, AuditTypeEOE) != -1 {
+ newEvent = false
+ AddEvent(netEvent)
+ if EventChan != nil {
+ eventChan <- *netEvent
+ }
+ }
+}
diff --git a/daemon/procmon/cache.go b/daemon/procmon/cache.go
new file mode 100644
index 0000000..395fc42
--- /dev/null
+++ b/daemon/procmon/cache.go
@@ -0,0 +1,339 @@
+package procmon
+
+import (
+ "fmt"
+ "os"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/evilsocket/opensnitch/daemon/core"
+)
+
+// InodeItem represents an item of the InodesCache.
+type InodeItem struct {
+ sync.RWMutex
+
+ Pid int
+ FdPath string
+ LastSeen int64
+}
+
+// ProcItem represents an item of the pidsCache
+type ProcItem struct {
+ sync.RWMutex
+
+ Pid int
+ FdPath string
+ Descriptors []string
+ LastSeen int64
+}
+
+// CacheProcs holds the cache of processes that have established connections.
+type CacheProcs struct {
+ sync.RWMutex
+ items []*ProcItem
+}
+
+// CacheInodes holds the cache of Inodes.
+// The key is formed as follow:
+// inode+srcip+srcport+dstip+dstport
+type CacheInodes struct {
+ sync.RWMutex
+ items map[string]*InodeItem
+}
+
+var (
+ // cache of inodes, which help to not iterate over all the pidsCache and
+ // descriptors of /proc//fd/
+ // 15-50us vs 50-80ms
+ // we hit this cache when:
+ // - we've blocked a connection and the process retries it several times until it gives up,
+ // - or when a process timeouts connecting to an IP/domain and it retries it again,
+ // - or when a process resolves a domain and then connects to the IP.
+ inodesCache = NewCacheOfInodes()
+ maxTTL = 3 // maximum 3 minutes of inactivity in cache. Really rare, usually they lasts less than a minute.
+
+ // 2nd cache of already known running pids, which also saves time by
+ // iterating only over a few pids' descriptors, (30us-20ms vs. 50-80ms)
+ // since it's more likely that most of the connections will be made by the
+ // same (running) processes.
+ // The cache is ordered by time, placing in the first places those PIDs with
+ // active connections.
+ pidsCache CacheProcs
+ pidsDescriptorsCache = make(map[int][]string)
+
+ cacheTicker = time.NewTicker(2 * time.Minute)
+)
+
+// CacheCleanerTask checks periodically if the inodes in the cache must be removed.
+func CacheCleanerTask() {
+ for {
+ select {
+ case <-cacheTicker.C:
+ inodesCache.cleanup()
+ }
+ }
+}
+
+// NewCacheOfInodes returns a new cache for inodes.
+func NewCacheOfInodes() *CacheInodes {
+ return &CacheInodes{
+ items: make(map[string]*InodeItem),
+ }
+}
+
+//******************************************************************************
+// items of the caches.
+
+func (i *InodeItem) updateTime() {
+ i.Lock()
+ i.LastSeen = time.Now().UnixNano()
+ i.Unlock()
+}
+
+func (i *InodeItem) getTime() int64 {
+ i.RLock()
+ defer i.RUnlock()
+ return i.LastSeen
+}
+
+func (p *ProcItem) updateTime() {
+ p.Lock()
+ p.LastSeen = time.Now().UnixNano()
+ p.Unlock()
+}
+
+func (p *ProcItem) updateDescriptors(descriptors []string) {
+ p.Lock()
+ p.Descriptors = descriptors
+ p.Unlock()
+}
+
+//******************************************************************************
+// cache of processes
+
+func (c *CacheProcs) add(fdPath string, fdList []string, pid int) {
+ c.Lock()
+ defer c.Unlock()
+ for n := range c.items {
+ item := c.items[n]
+ if item == nil {
+ continue
+ }
+ if item.Pid == pid {
+ item.updateTime()
+ return
+ }
+ }
+
+ procItem := &ProcItem{
+ Pid: pid,
+ FdPath: fdPath,
+ Descriptors: fdList,
+ LastSeen: time.Now().UnixNano(),
+ }
+
+ c.setItems([]*ProcItem{procItem}, c.items)
+}
+
+func (c *CacheProcs) sort(pid int) {
+ item := c.getItem(0)
+ if item != nil && item.Pid == pid {
+ return
+ }
+ c.RLock()
+ defer c.RUnlock()
+
+ sort.Slice(c.items, func(i, j int) bool {
+ t := c.items[i].LastSeen
+ u := c.items[j].LastSeen
+ return t > u || t == u
+ })
+}
+
+func (c *CacheProcs) delete(pid int) {
+ c.Lock()
+ defer c.Unlock()
+
+ for n, procItem := range c.items {
+ if procItem.Pid == pid {
+ c.deleteItem(n)
+ inodesCache.delete(pid)
+ break
+ }
+ }
+}
+
+func (c *CacheProcs) deleteItem(pos int) {
+ nItems := len(c.items)
+ if pos < nItems {
+ c.setItems(c.items[:pos], c.items[pos+1:])
+ }
+}
+
+func (c *CacheProcs) setItems(newItems []*ProcItem, oldItems []*ProcItem) {
+ c.items = append(newItems, oldItems...)
+}
+
+func (c *CacheProcs) getItem(index int) *ProcItem {
+ c.RLock()
+ defer c.RUnlock()
+
+ if index >= len(c.items) {
+ return nil
+ }
+
+ return c.items[index]
+}
+
+func (c *CacheProcs) getItems() []*ProcItem {
+ return c.items
+}
+
+func (c *CacheProcs) countItems() int {
+ c.RLock()
+ defer c.RUnlock()
+
+ return len(c.items)
+}
+
+// loop over the processes that have generated connections
+func (c *CacheProcs) getPid(inode int, inodeKey string, expect string) (int, int) {
+ c.Lock()
+ defer c.Unlock()
+
+ for n, procItem := range c.items {
+ if procItem == nil {
+ continue
+ }
+
+ if idxDesc, _ := getPidDescriptorsFromCache(procItem.FdPath, inodeKey, expect, &procItem.Descriptors, procItem.Pid); idxDesc != -1 {
+ procItem.updateTime()
+ return procItem.Pid, n
+ }
+
+ descriptors := lookupPidDescriptors(procItem.FdPath, procItem.Pid)
+ if descriptors == nil {
+ c.deleteItem(n)
+ continue
+ }
+
+ procItem.updateDescriptors(descriptors)
+ if idxDesc, _ := getPidDescriptorsFromCache(procItem.FdPath, inodeKey, expect, &descriptors, procItem.Pid); idxDesc != -1 {
+ procItem.updateTime()
+ return procItem.Pid, n
+ }
+ }
+
+ return -1, -1
+}
+
+//******************************************************************************
+// cache of inodes
+
+func (i *CacheInodes) add(key, descLink string, pid int) {
+ i.Lock()
+ defer i.Unlock()
+
+ if descLink == "" {
+ descLink = fmt.Sprint("/proc/", pid, "/exe")
+ }
+ i.items[key] = &InodeItem{
+ FdPath: descLink,
+ Pid: pid,
+ LastSeen: time.Now().UnixNano(),
+ }
+}
+
+func (i *CacheInodes) delete(pid int) {
+ i.Lock()
+ defer i.Unlock()
+
+ for k, inodeItem := range i.items {
+ if inodeItem.Pid == pid {
+ delete(i.items, k)
+ }
+ }
+}
+
+func (i *CacheInodes) getPid(inodeKey string) int {
+ if item, ok := i.isInCache(inodeKey); ok {
+ // sometimes the process may have disappeared at this point
+ if _, err := os.Lstat(item.FdPath); err == nil {
+ item.updateTime()
+ return item.Pid
+ }
+ pidsCache.delete(item.Pid)
+ i.delItem(inodeKey)
+ }
+
+ return -1
+}
+
+func (i *CacheInodes) delItem(inodeKey string) {
+ i.Lock()
+ defer i.Unlock()
+ delete(i.items, inodeKey)
+}
+
+func (i *CacheInodes) getItem(inodeKey string) *InodeItem {
+ i.RLock()
+ defer i.RUnlock()
+
+ return i.items[inodeKey]
+}
+
+func (i *CacheInodes) getItems() map[string]*InodeItem {
+ i.RLock()
+ defer i.RUnlock()
+
+ return i.items
+}
+
+func (i *CacheInodes) isInCache(inodeKey string) (*InodeItem, bool) {
+ i.RLock()
+ defer i.RUnlock()
+
+ if item, found := i.items[inodeKey]; found {
+ return item, true
+ }
+ return nil, false
+}
+
+func (i *CacheInodes) cleanup() {
+ now := time.Now()
+ i.Lock()
+ defer i.Unlock()
+ for k := range i.items {
+ if i.items[k] == nil {
+ continue
+ }
+ lastSeen := now.Sub(
+ time.Unix(0, i.items[k].getTime()),
+ )
+ if core.Exists(i.items[k].FdPath) == false || int(lastSeen.Minutes()) > maxTTL {
+ delete(i.items, k)
+ }
+ }
+}
+
+func getPidDescriptorsFromCache(fdPath, inodeKey, expect string, descriptors *[]string, pid int) (int, *[]string) {
+ for fdIdx := 0; fdIdx < len(*descriptors); fdIdx++ {
+ descLink := fmt.Sprint(fdPath, (*descriptors)[fdIdx])
+ if link, err := os.Readlink(descLink); err == nil && link == expect {
+ if fdIdx > 0 {
+ // reordering helps to reduce look up times by a factor of 10.
+ fd := (*descriptors)[fdIdx]
+ *descriptors = append((*descriptors)[:fdIdx], (*descriptors)[fdIdx+1:]...)
+ *descriptors = append([]string{fd}, *descriptors...)
+ }
+ if _, ok := inodesCache.isInCache(inodeKey); ok {
+ inodesCache.add(inodeKey, descLink, pid)
+ }
+ return fdIdx, descriptors
+ }
+ }
+
+ return -1, descriptors
+}
diff --git a/daemon/procmon/cache_test.go b/daemon/procmon/cache_test.go
new file mode 100644
index 0000000..5a7cd17
--- /dev/null
+++ b/daemon/procmon/cache_test.go
@@ -0,0 +1,103 @@
+package procmon
+
+import (
+ "fmt"
+ "testing"
+ "time"
+)
+
+func TestCacheProcs(t *testing.T) {
+ fdList := []string{"0", "1", "2"}
+ pidsCache.add(fmt.Sprint("/proc/", myPid, "/fd/"), fdList, myPid)
+ t.Log("Pids in cache: ", pidsCache.countItems())
+
+ t.Run("Test addProcEntry", func(t *testing.T) {
+ if pidsCache.countItems() != 1 {
+ t.Error("pidsCache should be 1")
+ }
+ })
+
+ oldPid := pidsCache.getItem(0)
+ pidsCache.add(fmt.Sprint("/proc/", myPid, "/fd/"), fdList, myPid)
+ t.Run("Test addProcEntry update", func(t *testing.T) {
+ if pidsCache.countItems() != 1 {
+ t.Error("pidsCache should still be 1!", pidsCache)
+ }
+ oldTime := time.Unix(0, oldPid.LastSeen)
+ newTime := time.Unix(0, pidsCache.getItem(0).LastSeen)
+ if oldTime.Equal(newTime) == false {
+ t.Error("pidsCache, time not updated: ", oldTime, newTime)
+ }
+ })
+
+ pidsCache.add("/proc/2/fd", fdList, 2)
+ pidsCache.delete(2)
+ t.Run("Test deleteProcEntry", func(t *testing.T) {
+ if pidsCache.countItems() != 1 {
+ t.Error("pidsCache should be 1:", pidsCache.countItems())
+ }
+ })
+
+ pid, _ := pidsCache.getPid(0, "", "/dev/null")
+ t.Run("Test getPidFromCache", func(t *testing.T) {
+ if pid != myPid {
+ t.Error("pid not found in cache", pidsCache.countItems())
+ }
+ })
+
+ // should not crash, and the number of items should still be 1
+ pidsCache.deleteItem(1)
+ t.Run("Test deleteItem check bounds", func(t *testing.T) {
+ if pidsCache.countItems() != 1 {
+ t.Error("deleteItem check bounds error", pidsCache.countItems())
+ }
+ })
+
+ pidsCache.deleteItem(0)
+ t.Run("Test deleteItem", func(t *testing.T) {
+ if pidsCache.countItems() != 0 {
+ t.Error("deleteItem error", pidsCache.countItems())
+ }
+ })
+ t.Log("items in cache:", pidsCache.countItems())
+
+ // the key of an inodeCache entry is formed as: inodeNumer + srcIP + srcPort + dstIP + dstPort
+ inodeKey := "000000000127.0.0.144444127.0.0.153"
+ // add() expects a path to the inode fd (/proc//fd/12345), but as getPid() will check the path in order to retrieve the pid,
+ // we just set it to "" and it'll use /proc//exe
+ inodesCache.add(inodeKey, "", myPid)
+ t.Run("Test addInodeEntry", func(t *testing.T) {
+ if _, found := inodesCache.items[inodeKey]; !found {
+ t.Error("inodesCache, inode not added:", len(inodesCache.items), inodesCache.items)
+ }
+ })
+
+ pid = inodesCache.getPid(inodeKey)
+ t.Run("Test getPidByInodeFromCache", func(t *testing.T) {
+ if pid != myPid {
+ t.Error("inode not found in cache", pid, inodeKey, len(inodesCache.items), inodesCache.items)
+ }
+ })
+
+ // should delete all inodes of a pid
+ inodesCache.delete(myPid)
+ t.Run("Test deleteInodeEntry", func(t *testing.T) {
+ if _, found := inodesCache.items[inodeKey]; found {
+ t.Error("inodesCache, key found in cache but it should not exist", inodeKey, len(inodesCache.items), inodesCache.items)
+ }
+ })
+}
+
+// Test getPidDescriptorsFromCache descriptors (inodes) reordering.
+// When an inode (descriptor) is found, if it's pushed to the top of the list,
+// the next time we look for it will cost -10x.
+// Without reordering, the inode 0 will always be found on the 10th position,
+// taking an average of 100us instead of 30.
+// Benchmark results with reordering: ~5600ns/op, without: ~56000ns/op.
+func BenchmarkGetPid(b *testing.B) {
+ fdList := []string{"10", "9", "8", "7", "6", "5", "4", "3", "2", "1", "0"}
+ pidsCache.add(fmt.Sprint("/proc/", myPid, "/fd/"), fdList, myPid)
+ for i := 0; i < b.N; i++ {
+ pidsCache.getPid(0, "", "/dev/null")
+ }
+}
diff --git a/daemon/procmon/details.go b/daemon/procmon/details.go
new file mode 100644
index 0000000..a69abb3
--- /dev/null
+++ b/daemon/procmon/details.go
@@ -0,0 +1,197 @@
+package procmon
+
+import (
+ "bufio"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "regexp"
+ "strconv"
+ "strings"
+
+ "github.com/evilsocket/opensnitch/daemon/core"
+ "github.com/evilsocket/opensnitch/daemon/dns"
+ "github.com/evilsocket/opensnitch/daemon/netlink"
+)
+
+var socketsRegex, _ = regexp.Compile(`socket:\[([0-9]+)\]`)
+
+// GetInfo collects information of a process.
+func (p *Process) GetInfo() error {
+ if err := p.readPath(); err != nil {
+ return err
+ }
+ p.readCwd()
+ p.readCmdline()
+ p.readEnv()
+ p.readDescriptors()
+ p.readIOStats()
+ p.readStatus()
+ p.cleanPath()
+
+ return nil
+}
+
+func (p *Process) setCwd(cwd string) {
+ p.CWD = cwd
+}
+
+func (p *Process) readComm() error {
+ data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/comm", p.ID))
+ if err != nil {
+ return err
+ }
+ p.Comm = core.Trim(string(data))
+ return nil
+}
+
+func (p *Process) readCwd() error {
+ link, err := os.Readlink(fmt.Sprintf("/proc/%d/cwd", p.ID))
+ if err != nil {
+ return err
+ }
+ p.CWD = link
+ return nil
+}
+
+// read and parse environment variables of a process.
+func (p *Process) readEnv() {
+ if data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/environ", p.ID)); err == nil {
+ for _, s := range strings.Split(string(data), "\x00") {
+ parts := strings.SplitN(core.Trim(s), "=", 2)
+ if parts != nil && len(parts) == 2 {
+ key := core.Trim(parts[0])
+ val := core.Trim(parts[1])
+ p.Env[key] = val
+ }
+ }
+ }
+}
+
+func (p *Process) readPath() error {
+ linkName := fmt.Sprint("/proc/", p.ID, "/exe")
+ if _, err := os.Lstat(linkName); err != nil {
+ return err
+ }
+
+ if link, err := os.Readlink(linkName); err == nil {
+ p.Path = link
+ }
+
+ return nil
+}
+
+func (p *Process) readCmdline() {
+ if data, err := ioutil.ReadFile(fmt.Sprintf("/proc/%d/cmdline", p.ID)); err == nil {
+ if len(data) == 0 {
+ return
+ }
+ for i, b := range data {
+ if b == 0x00 {
+ data[i] = byte(' ')
+ }
+ }
+
+ p.Args = make([]string, 0)
+
+ args := strings.Split(string(data), " ")
+ for _, arg := range args {
+ arg = core.Trim(arg)
+ if arg != "" {
+ p.Args = append(p.Args, arg)
+ }
+ }
+ }
+}
+
+func (p *Process) readDescriptors() {
+ f, err := os.Open(fmt.Sprint("/proc/", p.ID, "/fd/"))
+ if err != nil {
+ return
+ }
+ fDesc, err := f.Readdir(-1)
+ f.Close()
+ p.Descriptors = nil
+
+ for _, fd := range fDesc {
+ tempFd := &procDescriptors{
+ Name: fd.Name(),
+ }
+ if link, err := os.Readlink(fmt.Sprint("/proc/", p.ID, "/fd/", fd.Name())); err == nil {
+ tempFd.SymLink = link
+ socket := socketsRegex.FindStringSubmatch(link)
+ if len(socket) > 0 {
+ socketInfo, err := netlink.GetSocketInfoByInode(socket[1])
+ if err == nil {
+ tempFd.SymLink = fmt.Sprintf("socket:[%s] - %d:%s -> %s:%d, state: %s", fd.Name(),
+ socketInfo.ID.SourcePort,
+ socketInfo.ID.Source.String(),
+ dns.HostOr(socketInfo.ID.Destination, socketInfo.ID.Destination.String()),
+ socketInfo.ID.DestinationPort,
+ netlink.TCPStatesMap[socketInfo.State])
+ }
+ }
+
+ if linkInfo, err := os.Lstat(link); err == nil {
+ tempFd.Size = linkInfo.Size()
+ tempFd.ModTime = linkInfo.ModTime()
+ }
+ }
+ p.Descriptors = append(p.Descriptors, tempFd)
+ }
+}
+
+func (p *Process) readIOStats() {
+ f, err := os.Open(fmt.Sprint("/proc/", p.ID, "/io"))
+ if err != nil {
+ return
+ }
+ defer f.Close()
+
+ p.IOStats = &procIOstats{}
+
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() {
+ s := strings.Split(scanner.Text(), " ")
+ switch s[0] {
+ case "rchar:":
+ p.IOStats.RChar, _ = strconv.ParseInt(s[1], 10, 64)
+ case "wchar:":
+ p.IOStats.WChar, _ = strconv.ParseInt(s[1], 10, 64)
+ case "syscr:":
+ p.IOStats.SyscallRead, _ = strconv.ParseInt(s[1], 10, 64)
+ case "syscw:":
+ p.IOStats.SyscallWrite, _ = strconv.ParseInt(s[1], 10, 64)
+ case "read_bytes:":
+ p.IOStats.ReadBytes, _ = strconv.ParseInt(s[1], 10, 64)
+ case "write_bytes:":
+ p.IOStats.WriteBytes, _ = strconv.ParseInt(s[1], 10, 64)
+ }
+ }
+}
+
+func (p *Process) readStatus() {
+ if data, err := ioutil.ReadFile(fmt.Sprint("/proc/", p.ID, "/status")); err == nil {
+ p.Status = string(data)
+ }
+ if data, err := ioutil.ReadFile(fmt.Sprint("/proc/", p.ID, "/stat")); err == nil {
+ p.Stat = string(data)
+ }
+ if data, err := ioutil.ReadFile(fmt.Sprint("/proc/", p.ID, "/stack")); err == nil {
+ p.Stack = string(data)
+ }
+ if data, err := ioutil.ReadFile(fmt.Sprint("/proc/", p.ID, "/maps")); err == nil {
+ p.Maps = string(data)
+ }
+ if data, err := ioutil.ReadFile(fmt.Sprint("/proc/", p.ID, "/statm")); err == nil {
+ p.Statm = &procStatm{}
+ fmt.Sscanf(string(data), "%d %d %d %d %d %d %d", &p.Statm.Size, &p.Statm.Resident, &p.Statm.Shared, &p.Statm.Text, &p.Statm.Lib, &p.Statm.Data, &p.Statm.Dt)
+ }
+}
+
+func (p *Process) cleanPath() {
+ pathLen := len(p.Path)
+ if pathLen >= 10 && p.Path[pathLen-10:] == " (deleted)" {
+ p.Path = p.Path[:len(p.Path)-10]
+ }
+}
diff --git a/daemon/procmon/ebpf/cache.go b/daemon/procmon/ebpf/cache.go
new file mode 100644
index 0000000..e408be4
--- /dev/null
+++ b/daemon/procmon/ebpf/cache.go
@@ -0,0 +1,118 @@
+package ebpf
+
+import (
+ "sync"
+ "time"
+)
+
+type ebpfCacheItem struct {
+ Key []byte
+ LastSeen int64
+ UID int
+ Pid int
+ Hits uint
+}
+
+type ebpfCacheType struct {
+ Items map[string]*ebpfCacheItem
+ sync.RWMutex
+}
+
+var (
+ maxTTL = 20 // Seconds
+ maxCacheItems = 5000
+ ebpfCache *ebpfCacheType
+ ebpfCacheTicker *time.Ticker
+)
+
+// NewEbpfCacheItem creates a new cache item.
+func NewEbpfCacheItem(key []byte, pid, uid int) *ebpfCacheItem {
+ return &ebpfCacheItem{
+ Key: key,
+ Hits: 1,
+ Pid: pid,
+ UID: uid,
+ LastSeen: time.Now().UnixNano(),
+ }
+}
+
+func (i *ebpfCacheItem) isValid() bool {
+ lastSeen := time.Now().Sub(
+ time.Unix(0, i.LastSeen),
+ )
+ return int(lastSeen.Seconds()) < maxTTL
+}
+
+// NewEbpfCache creates a new cache store.
+func NewEbpfCache() *ebpfCacheType {
+ ebpfCacheTicker = time.NewTicker(1 * time.Minute)
+ return &ebpfCacheType{
+ Items: make(map[string]*ebpfCacheItem, 0),
+ }
+}
+
+func (e *ebpfCacheType) addNewItem(key string, itemKey []byte, pid, uid int) {
+ e.Lock()
+ defer e.Unlock()
+
+ e.Items[key] = NewEbpfCacheItem(itemKey, pid, uid)
+}
+
+func (e *ebpfCacheType) isInCache(key string) (item *ebpfCacheItem, found bool) {
+ leng := e.Len()
+
+ e.Lock()
+ item, found = e.Items[key]
+ if found {
+ if item.isValid() {
+ e.update(key, item)
+ } else {
+ found = false
+ delete(e.Items, key)
+ }
+ }
+ e.Unlock()
+
+ if leng > maxCacheItems {
+ e.DeleteOldItems()
+ }
+ return
+}
+
+func (e *ebpfCacheType) update(key string, item *ebpfCacheItem) {
+ item.Hits++
+ item.LastSeen = time.Now().UnixNano()
+ e.Items[key] = item
+}
+
+func (e *ebpfCacheType) Len() int {
+ e.RLock()
+ defer e.RUnlock()
+ return len(e.Items)
+}
+
+func (e *ebpfCacheType) DeleteOldItems() {
+ length := e.Len()
+
+ e.Lock()
+ defer e.Unlock()
+
+ for k, item := range e.Items {
+ if length > maxCacheItems || !item.isValid() {
+ delete(e.Items, k)
+ }
+ }
+}
+
+func (e *ebpfCacheType) clear() {
+ if e == nil {
+ return
+ }
+ for k := range e.Items {
+ delete(e.Items, k)
+ }
+
+ if ebpfCacheTicker != nil {
+ ebpfCacheTicker.Stop()
+ }
+}
diff --git a/daemon/procmon/ebpf/debug.go b/daemon/procmon/ebpf/debug.go
new file mode 100644
index 0000000..b423768
--- /dev/null
+++ b/daemon/procmon/ebpf/debug.go
@@ -0,0 +1,102 @@
+package ebpf
+
+import (
+ "fmt"
+ "os/exec"
+ "strconv"
+ "syscall"
+ "unsafe"
+
+ "github.com/evilsocket/opensnitch/daemon/log"
+ daemonNetlink "github.com/evilsocket/opensnitch/daemon/netlink"
+ elf "github.com/iovisor/gobpf/elf"
+)
+
+// print map contents. used only for debugging
+func dumpMap(bpfmap *elf.Map, isIPv6 bool) {
+ var lookupKey []byte
+ var nextKey []byte
+ var value []byte
+ if !isIPv6 {
+ lookupKey = make([]byte, 12)
+ nextKey = make([]byte, 12)
+ value = make([]byte, 24)
+ } else {
+ lookupKey = make([]byte, 36)
+ nextKey = make([]byte, 36)
+ value = make([]byte, 24)
+ }
+ firstrun := true
+ i := 0
+ for {
+ i++
+ ok, err := m.LookupNextElement(bpfmap, unsafe.Pointer(&lookupKey[0]),
+ unsafe.Pointer(&nextKey[0]), unsafe.Pointer(&value[0]))
+ if err != nil {
+ log.Error("eBPF LookupNextElement error: %v", err)
+ return
+ }
+ if firstrun {
+ // on first run lookupKey is a dummy, nothing to delete
+ firstrun = false
+ copy(lookupKey, nextKey)
+ continue
+ }
+ fmt.Println("key, value", lookupKey, value)
+
+ if !ok { //reached end of map
+ break
+ }
+ copy(lookupKey, nextKey)
+ }
+}
+
+//PrintEverything prints all the stats. used only for debugging
+func PrintEverything() {
+ bash, _ := exec.LookPath("bash")
+ //get the number of the first map
+ out, err := exec.Command(bash, "-c", "bpftool map show | head -n 1 | cut -d ':' -f1").Output()
+ if err != nil {
+ fmt.Println("bpftool map dump name tcpMap ", err)
+ }
+ i, _ := strconv.Atoi(string(out[:len(out)-1]))
+ fmt.Println("i is", i)
+
+ //dump all maps for analysis
+ for j := i; j < i+14; j++ {
+ _, _ = exec.Command(bash, "-c", "bpftool map dump id "+strconv.Itoa(j)+" > dump"+strconv.Itoa(j)).Output()
+ }
+
+ alreadyEstablished.RLock()
+ for sock1, v := range alreadyEstablished.TCP {
+ fmt.Println(*sock1, v)
+ }
+
+ fmt.Println("---------------------")
+ for sock1, v := range alreadyEstablished.TCPv6 {
+ fmt.Println(*sock1, v)
+ }
+ alreadyEstablished.RUnlock()
+
+ fmt.Println("---------------------")
+ sockets, _ := daemonNetlink.SocketsDump(syscall.AF_INET, syscall.IPPROTO_TCP)
+ for idx := range sockets {
+ fmt.Println("socket tcp: ", sockets[idx])
+ }
+ fmt.Println("---------------------")
+ sockets, _ = daemonNetlink.SocketsDump(syscall.AF_INET6, syscall.IPPROTO_TCP)
+ for idx := range sockets {
+ fmt.Println("socket tcp6: ", sockets[idx])
+ }
+ fmt.Println("---------------------")
+ sockets, _ = daemonNetlink.SocketsDump(syscall.AF_INET, syscall.IPPROTO_UDP)
+ for idx := range sockets {
+ fmt.Println("socket udp: ", sockets[idx])
+ }
+ fmt.Println("---------------------")
+ sockets, _ = daemonNetlink.SocketsDump(syscall.AF_INET6, syscall.IPPROTO_UDP)
+ for idx := range sockets {
+ fmt.Println("socket udp6: ", sockets[idx])
+ }
+
+}
diff --git a/daemon/procmon/ebpf/ebpf.go b/daemon/procmon/ebpf/ebpf.go
new file mode 100644
index 0000000..a04781d
--- /dev/null
+++ b/daemon/procmon/ebpf/ebpf.go
@@ -0,0 +1,188 @@
+package ebpf
+
+import (
+ "encoding/binary"
+ "fmt"
+ "net"
+ "sync"
+ "syscall"
+ "unsafe"
+
+ "github.com/evilsocket/opensnitch/daemon/core"
+ "github.com/evilsocket/opensnitch/daemon/log"
+ daemonNetlink "github.com/evilsocket/opensnitch/daemon/netlink"
+ "github.com/evilsocket/opensnitch/daemon/procmon"
+ elf "github.com/iovisor/gobpf/elf"
+)
+
+//contains pointers to ebpf maps for a given protocol (tcp/udp/v6)
+type ebpfMapsForProto struct {
+ counterMap *elf.Map
+ bpfmap *elf.Map
+}
+
+//Not in use, ~4usec faster lookup compared to m.LookupElement()
+
+//mimics union bpf_attr's anonymous struct used by BPF_MAP_*_ELEM commands
+//from /include/uapi/linux/bpf.h
+type bpf_lookup_elem_t struct {
+ map_fd uint64 //even though in bpf.h its type is __u32, we must make it 8 bytes long
+ //because "key" is of type __aligned_u64, i.e. "key" must be aligned on an 8-byte boundary
+ key uintptr
+ value uintptr
+}
+
+type alreadyEstablishedConns struct {
+ TCP map[*daemonNetlink.Socket]int
+ TCPv6 map[*daemonNetlink.Socket]int
+ sync.RWMutex
+}
+
+var (
+ m *elf.Module
+ lock = sync.RWMutex{}
+ mapSize = uint(12000)
+ ebpfMaps map[string]*ebpfMapsForProto
+ //connections which were established at the time when opensnitch started
+ alreadyEstablished = alreadyEstablishedConns{
+ TCP: make(map[*daemonNetlink.Socket]int),
+ TCPv6: make(map[*daemonNetlink.Socket]int),
+ }
+
+ //stop == true is a signal for all goroutines to stop
+ stop = false
+
+ // list of local addresses of this machine
+ localAddresses []net.IP
+
+ hostByteOrder binary.ByteOrder
+)
+
+//Start installs ebpf kprobes
+func Start() error {
+
+ if err := mountDebugFS(); err != nil {
+ log.Error("ebpf.Start -> mount debugfs error. Report on github please: %s", err)
+ return err
+ }
+
+ m = elf.NewModule("/etc/opensnitchd/opensnitch.o")
+ if err := m.Load(nil); err != nil {
+ log.Error("eBPF Failed to load /etc/opensnitchd/opensnitch.o: %v", err)
+ return err
+ }
+
+ // if previous shutdown was unclean, then we must remove the dangling kprobe
+ // and install it again (close the module and load it again)
+ if err := m.EnableKprobes(0); err != nil {
+ m.Close()
+ if err := m.Load(nil); err != nil {
+ log.Error("eBPF failed to load /etc/opensnitchd/opensnitch.o (2): %v", err)
+ return err
+ }
+ if err := m.EnableKprobes(0); err != nil {
+ log.Error("eBPF error when enabling kprobes: %v", err)
+ return err
+ }
+ }
+
+ // init all connection counters to 0
+ zeroKey := make([]byte, 4)
+ zeroValue := make([]byte, 8)
+ for _, name := range []string{"tcpcounter", "tcpv6counter", "udpcounter", "udpv6counter"} {
+ err := m.UpdateElement(m.Map(name), unsafe.Pointer(&zeroKey[0]), unsafe.Pointer(&zeroValue[0]), 0)
+ if err != nil {
+ log.Error("eBPF could not init counters to zero: %v", err)
+ return err
+ }
+ }
+ ebpfCache = NewEbpfCache()
+
+ lock.Lock()
+ //determine host byte order
+ buf := [2]byte{}
+ *(*uint16)(unsafe.Pointer(&buf[0])) = uint16(0xABCD)
+ switch buf {
+ case [2]byte{0xCD, 0xAB}:
+ hostByteOrder = binary.LittleEndian
+ case [2]byte{0xAB, 0xCD}:
+ hostByteOrder = binary.BigEndian
+ default:
+ log.Error("Could not determine host byte order.")
+ }
+ lock.Unlock()
+
+ ebpfMaps = map[string]*ebpfMapsForProto{
+ "tcp": {
+ counterMap: m.Map("tcpcounter"),
+ bpfmap: m.Map("tcpMap")},
+ "tcp6": {
+ counterMap: m.Map("tcpv6counter"),
+ bpfmap: m.Map("tcpv6Map")},
+ "udp": {
+ counterMap: m.Map("udpcounter"),
+ bpfmap: m.Map("udpMap")},
+ "udp6": {
+ counterMap: m.Map("udpv6counter"),
+ bpfmap: m.Map("udpv6Map")},
+ }
+
+ saveEstablishedConnections(uint8(syscall.AF_INET))
+ if core.IPv6Enabled {
+ saveEstablishedConnections(uint8(syscall.AF_INET6))
+ }
+
+ go monitorCache()
+ go monitorMaps()
+ go monitorLocalAddresses()
+ go monitorAlreadyEstablished()
+ return nil
+}
+
+func saveEstablishedConnections(commDomain uint8) error {
+ // save already established connections
+ socketListTCP, err := daemonNetlink.SocketsDump(commDomain, uint8(syscall.IPPROTO_TCP))
+ if err != nil {
+ log.Debug("eBPF could not dump TCP (%d) sockets via netlink: %v", commDomain, err)
+ return err
+ }
+ for _, sock := range socketListTCP {
+ inode := int((*sock).INode)
+ pid := procmon.GetPIDFromINode(inode, fmt.Sprint(inode,
+ (*sock).ID.Source, (*sock).ID.SourcePort, (*sock).ID.Destination, (*sock).ID.DestinationPort))
+ alreadyEstablished.Lock()
+ alreadyEstablished.TCP[sock] = pid
+ alreadyEstablished.Unlock()
+ }
+
+ return nil
+}
+
+// Stop stops monitoring connections using kprobes
+func Stop() {
+ lock.Lock()
+ stop = true
+ lock.Unlock()
+ if m != nil {
+ m.Close()
+ }
+ ebpfCache.clear()
+}
+
+func isStopped() bool {
+ lock.RLock()
+ defer lock.RUnlock()
+
+ return stop
+}
+
+//make bpf() syscall with bpf_lookup prepared by the caller
+func makeBpfSyscall(bpf_lookup *bpf_lookup_elem_t) uintptr {
+ BPF_MAP_LOOKUP_ELEM := 1 //cmd number
+ syscall_BPF := 321 //syscall number
+ sizeOfStruct := 24 //sizeof bpf_lookup_elem_t struct
+
+ r1, _, _ := syscall.Syscall(uintptr(syscall_BPF), uintptr(BPF_MAP_LOOKUP_ELEM),
+ uintptr(unsafe.Pointer(bpf_lookup)), uintptr(sizeOfStruct))
+ return r1
+}
diff --git a/daemon/procmon/ebpf/find.go b/daemon/procmon/ebpf/find.go
new file mode 100644
index 0000000..be30f30
--- /dev/null
+++ b/daemon/procmon/ebpf/find.go
@@ -0,0 +1,171 @@
+package ebpf
+
+import (
+ "encoding/binary"
+ "fmt"
+ "net"
+ "unsafe"
+
+ daemonNetlink "github.com/evilsocket/opensnitch/daemon/netlink"
+)
+
+// we need to manually remove old connections from a bpf map
+
+// GetPid looks up process pid in a bpf map. If not found there, then it searches
+// already-established TCP connections.
+func GetPid(proto string, srcPort uint, srcIP net.IP, dstIP net.IP, dstPort uint) (int, int, error) {
+ if hostByteOrder == nil {
+ return -1, -1, fmt.Errorf("eBPF monitoring method not initialized yet")
+ }
+
+ if pid, uid := getPidFromEbpf(proto, srcPort, srcIP, dstIP, dstPort); pid != -1 {
+ return pid, uid, nil
+ }
+ //check if it comes from already established TCP
+ if proto == "tcp" || proto == "tcp6" {
+ if pid, uid, err := findInAlreadyEstablishedTCP(proto, srcPort, srcIP, dstIP, dstPort); err == nil {
+ return pid, uid, nil
+ }
+ }
+ //using netlink.GetSocketInfo to check if UID is 0 (in-kernel connection)
+ if uid, _ := daemonNetlink.GetSocketInfo(proto, srcIP, srcPort, dstIP, dstPort); uid == 0 {
+ return -100, -100, nil
+ }
+ if !findAddressInLocalAddresses(srcIP) {
+ // systemd-resolved sometimes makes a TCP Fast Open connection to a DNS server (8.8.8.8 on my machine)
+ // and we get a packet here with **source** (not detination!!!) IP 8.8.8.8
+ // Maybe it's an in-kernel response with spoofed IP because wireshark does not show neither
+ // resolved's TCP Fast Open packet, nor the response
+ // Until this is better understood, we simply do not allow this machine to make connections with
+ // arbitrary source IPs
+ return -1, -1, fmt.Errorf("eBPF packet with unknown source IP: %s", srcIP)
+ }
+ return -1, -1, nil
+}
+
+// getPidFromEbpf looks up a connection in bpf map and returns PID if found
+// the lookup keys and values are defined in opensnitch.c , e.g.
+//
+// struct tcp_key_t {
+// u16 sport;
+// u32 daddr;
+// u16 dport;
+// u32 saddr;
+// }__attribute__((packed));
+
+// struct tcp_value_t{
+// u64 pid;
+// u64 uid;
+// u64 counter;
+// }__attribute__((packed));;
+
+func getPidFromEbpf(proto string, srcPort uint, srcIP net.IP, dstIP net.IP, dstPort uint) (pid int, uid int) {
+ if hostByteOrder == nil {
+ return -1, -1
+ }
+ // Some connections, like broadcasts, are only seen in eBPF once,
+ // but some applications send 1 connection per network interface.
+ // If we delete the eBPF entry the first time we see it, we won't find
+ // the connection the next times.
+ delItemIfFound := true
+
+ var key []byte
+ var value []byte
+ var isIP4 bool = (proto == "tcp") || (proto == "udp") || (proto == "udplite")
+
+ if isIP4 {
+ key = make([]byte, 12)
+ value = make([]byte, 24)
+ copy(key[2:6], dstIP)
+ binary.BigEndian.PutUint16(key[6:8], uint16(dstPort))
+ copy(key[8:12], srcIP)
+ } else { // IPv6
+ key = make([]byte, 36)
+ value = make([]byte, 24)
+ copy(key[2:18], dstIP)
+ binary.BigEndian.PutUint16(key[18:20], uint16(dstPort))
+ copy(key[20:36], srcIP)
+ }
+ hostByteOrder.PutUint16(key[0:2], uint16(srcPort))
+
+ k := fmt.Sprint(proto, srcPort, srcIP.String(), dstIP.String(), dstPort)
+ cacheItem, isInCache := ebpfCache.isInCache(k)
+ if isInCache {
+ deleteEbpfEntry(proto, unsafe.Pointer(&key[0]))
+ return cacheItem.Pid, cacheItem.UID
+ }
+
+ err := m.LookupElement(ebpfMaps[proto].bpfmap, unsafe.Pointer(&key[0]), unsafe.Pointer(&value[0]))
+ if err != nil {
+ // key not found
+ // sometimes srcIP is 0.0.0.0. Happens especially with UDP sendto()
+ // for example: 57621:10.0.3.1 -> 10.0.3.255:57621 , reported as: 0.0.0.0 -> 10.0.3.255
+ if isIP4 {
+ zeroes := make([]byte, 4)
+ copy(key[8:12], zeroes)
+ } else {
+ zeroes := make([]byte, 16)
+ copy(key[20:36], zeroes)
+ }
+ err = m.LookupElement(ebpfMaps[proto].bpfmap, unsafe.Pointer(&key[0]), unsafe.Pointer(&value[0]))
+ if err == nil {
+ delItemIfFound = false
+ }
+ }
+ if err != nil && proto == "udp" && srcIP.String() == dstIP.String() {
+ // very rarely I see this connection. It has srcIP and dstIP == 0.0.0.0 in ebpf map
+ // it is a localhost to localhost connection
+ // srcIP was already set to 0, set dstIP to zero also
+ // TODO try to reproduce it and look for srcIP/dstIP in other kernel structures
+ zeroes := make([]byte, 4)
+ copy(key[2:6], zeroes)
+ err = m.LookupElement(ebpfMaps[proto].bpfmap, unsafe.Pointer(&key[0]), unsafe.Pointer(&value[0]))
+ }
+
+ if err != nil {
+ // key not found in bpf maps
+ return -1, -1
+ }
+ pid = int(hostByteOrder.Uint32(value[0:4]))
+ uid = int(hostByteOrder.Uint32(value[8:12]))
+
+ ebpfCache.addNewItem(k, key, pid, uid)
+ if delItemIfFound {
+ deleteEbpfEntry(proto, unsafe.Pointer(&key[0]))
+ }
+ return pid, uid
+}
+
+// FindInAlreadyEstablishedTCP searches those TCP connections which were already established at the time
+// when opensnitch started
+func findInAlreadyEstablishedTCP(proto string, srcPort uint, srcIP net.IP, dstIP net.IP, dstPort uint) (int, int, error) {
+ alreadyEstablished.RLock()
+ defer alreadyEstablished.RUnlock()
+
+ var _alreadyEstablished map[*daemonNetlink.Socket]int
+ if proto == "tcp" {
+ _alreadyEstablished = alreadyEstablished.TCP
+ } else if proto == "tcp6" {
+ _alreadyEstablished = alreadyEstablished.TCPv6
+ }
+
+ for sock, v := range _alreadyEstablished {
+ if (*sock).ID.SourcePort == uint16(srcPort) && (*sock).ID.Source.Equal(srcIP) &&
+ (*sock).ID.Destination.Equal(dstIP) && (*sock).ID.DestinationPort == uint16(dstPort) {
+ return v, int((*sock).UID), nil
+ }
+ }
+ return -1, -1, fmt.Errorf("eBPF inode not found")
+}
+
+//returns true if addr is in the list of this machine's addresses
+func findAddressInLocalAddresses(addr net.IP) bool {
+ lock.Lock()
+ defer lock.Unlock()
+ for _, a := range localAddresses {
+ if addr.String() == a.String() {
+ return true
+ }
+ }
+ return false
+}
diff --git a/daemon/procmon/ebpf/monitor.go b/daemon/procmon/ebpf/monitor.go
new file mode 100644
index 0000000..e2898b4
--- /dev/null
+++ b/daemon/procmon/ebpf/monitor.go
@@ -0,0 +1,127 @@
+package ebpf
+
+import (
+ "syscall"
+ "time"
+
+ "github.com/evilsocket/opensnitch/daemon/core"
+ "github.com/evilsocket/opensnitch/daemon/log"
+ daemonNetlink "github.com/evilsocket/opensnitch/daemon/netlink"
+ "github.com/vishvananda/netlink"
+)
+
+// we need to manually remove old connections from a bpf map
+// since when a bpf map is full it doesn't allow any more insertions
+func monitorMaps() {
+ for {
+ if isStopped() {
+ return
+ }
+ time.Sleep(time.Second * 5)
+ for name := range ebpfMaps {
+ // using a pointer to the map doesn't delete the items.
+ // bpftool still counts them.
+ if items := getItems(name, name == "tcp6" || name == "udp6"); items > 500 {
+ deleted := deleteOldItems(name, name == "tcp6" || name == "udp6", items/2)
+ log.Debug("[ebpf] old items deleted: %d", deleted)
+ }
+ }
+ }
+}
+
+func monitorCache() {
+ for {
+ select {
+ case <-ebpfCacheTicker.C:
+ if isStopped() {
+ return
+ }
+ ebpfCache.DeleteOldItems()
+ }
+ }
+}
+
+// maintains a list of this machine's local addresses
+// TODO: use netlink.AddrSubscribeWithOptions()
+func monitorLocalAddresses() {
+ for {
+ addr, err := netlink.AddrList(nil, netlink.FAMILY_ALL)
+ if err != nil {
+ log.Error("eBPF error looking up this machine's addresses via netlink: %v", err)
+ continue
+ }
+ lock.Lock()
+ localAddresses = nil
+ for _, a := range addr {
+ localAddresses = append(localAddresses, a.IP)
+ }
+ lock.Unlock()
+ time.Sleep(time.Second * 1)
+ if isStopped() {
+ return
+ }
+ }
+}
+
+// monitorAlreadyEstablished makes sure that when an already-established connection is closed
+// it will be removed from alreadyEstablished. If we don't do this and keep the alreadyEstablished entry forever,
+// then after the genuine process quits,a malicious process may reuse PID-srcPort-srcIP-dstPort-dstIP
+func monitorAlreadyEstablished() {
+ for {
+ time.Sleep(time.Second * 1)
+ if isStopped() {
+ return
+ }
+ socketListTCP, err := daemonNetlink.SocketsDump(uint8(syscall.AF_INET), uint8(syscall.IPPROTO_TCP))
+ if err != nil {
+ log.Debug("eBPF error in dumping TCP sockets via netlink")
+ continue
+ }
+ alreadyEstablished.Lock()
+ for aesock := range alreadyEstablished.TCP {
+ found := false
+ for _, sock := range socketListTCP {
+ if socketsAreEqual(aesock, sock) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ delete(alreadyEstablished.TCP, aesock)
+ }
+ }
+ alreadyEstablished.Unlock()
+
+ if core.IPv6Enabled {
+ socketListTCPv6, err := daemonNetlink.SocketsDump(uint8(syscall.AF_INET6), uint8(syscall.IPPROTO_TCP))
+ if err != nil {
+ log.Debug("eBPF error in dumping TCPv6 sockets via netlink: %s", err)
+ continue
+ }
+ alreadyEstablished.Lock()
+ for aesock := range alreadyEstablished.TCPv6 {
+ found := false
+ for _, sock := range socketListTCPv6 {
+ if socketsAreEqual(aesock, sock) {
+ found = true
+ break
+ }
+ }
+ if !found {
+ delete(alreadyEstablished.TCPv6, aesock)
+ }
+ }
+ alreadyEstablished.Unlock()
+ }
+ }
+}
+
+func socketsAreEqual(aSocket, bSocket *daemonNetlink.Socket) bool {
+ return ((*aSocket).INode == (*bSocket).INode &&
+ //inodes are unique enough, so the matches below will never have to be checked
+ (*aSocket).ID.SourcePort == (*bSocket).ID.SourcePort &&
+ (*aSocket).ID.Source.Equal((*bSocket).ID.Source) &&
+ (*aSocket).ID.Destination.Equal((*bSocket).ID.Destination) &&
+ (*aSocket).ID.DestinationPort == (*bSocket).ID.DestinationPort &&
+ (*aSocket).UID == (*bSocket).UID)
+}
diff --git a/daemon/procmon/ebpf/utils.go b/daemon/procmon/ebpf/utils.go
new file mode 100644
index 0000000..c83f895
--- /dev/null
+++ b/daemon/procmon/ebpf/utils.go
@@ -0,0 +1,124 @@
+package ebpf
+
+import (
+ "fmt"
+ "unsafe"
+
+ "github.com/evilsocket/opensnitch/daemon/core"
+ "github.com/evilsocket/opensnitch/daemon/log"
+)
+
+func mountDebugFS() error {
+ debugfsPath := "/sys/kernel/debug/"
+ kprobesPath := fmt.Sprint(debugfsPath, "tracing/kprobe_events")
+ if core.Exists(kprobesPath) == false {
+ if _, err := core.Exec("mount", []string{"-t", "debugfs", "none", debugfsPath}); err != nil {
+ log.Warning("eBPF debugfs error: %s", err)
+ return err
+ }
+ }
+
+ return nil
+}
+
+func deleteEbpfEntry(proto string, key unsafe.Pointer) bool {
+ if err := m.DeleteElement(ebpfMaps[proto].bpfmap, key); err != nil {
+ return false
+ }
+ return true
+}
+
+func getItems(proto string, isIPv6 bool) (items uint) {
+ isDup := make(map[string]uint8)
+ var lookupKey []byte
+ var nextKey []byte
+ var value []byte
+ if !isIPv6 {
+ lookupKey = make([]byte, 12)
+ nextKey = make([]byte, 12)
+ } else {
+ lookupKey = make([]byte, 36)
+ nextKey = make([]byte, 36)
+ }
+ value = make([]byte, 24)
+ firstrun := true
+
+ for {
+ ok, err := m.LookupNextElement(ebpfMaps[proto].bpfmap, unsafe.Pointer(&lookupKey[0]),
+ unsafe.Pointer(&nextKey[0]), unsafe.Pointer(&value[0]))
+ if !ok || err != nil { //reached end of map
+ log.Debug("[ebpf] %s map: %d active items", proto, items)
+ return
+ }
+ if firstrun {
+ // on first run lookupKey is a dummy, nothing to delete
+ firstrun = false
+ copy(lookupKey, nextKey)
+ continue
+ }
+ if counter, duped := isDup[string(lookupKey)]; duped && counter > 1 {
+ deleteEbpfEntry(proto, unsafe.Pointer(&lookupKey[0]))
+ continue
+ }
+ isDup[string(lookupKey)]++
+ copy(lookupKey, nextKey)
+ items++
+ }
+
+ return items
+}
+
+// deleteOldItems deletes maps' elements in order to keep them below maximum capacity.
+// If ebpf maps are full they don't allow any more insertions, ending up lossing events.
+func deleteOldItems(proto string, isIPv6 bool, maxToDelete uint) (deleted uint) {
+ isDup := make(map[string]uint8)
+ var lookupKey []byte
+ var nextKey []byte
+ var value []byte
+ if !isIPv6 {
+ lookupKey = make([]byte, 12)
+ nextKey = make([]byte, 12)
+ } else {
+ lookupKey = make([]byte, 36)
+ nextKey = make([]byte, 36)
+ }
+ value = make([]byte, 24)
+ firstrun := true
+ i := uint(0)
+
+ for {
+ i++
+ if i > maxToDelete {
+ return
+ }
+ ok, err := m.LookupNextElement(ebpfMaps[proto].bpfmap, unsafe.Pointer(&lookupKey[0]),
+ unsafe.Pointer(&nextKey[0]), unsafe.Pointer(&value[0]))
+ if !ok || err != nil { //reached end of map
+ return
+ }
+ if counter, duped := isDup[string(lookupKey)]; duped && counter > 1 {
+ if deleteEbpfEntry(proto, unsafe.Pointer(&lookupKey[0])) {
+ deleted++
+ copy(lookupKey, nextKey)
+ continue
+ }
+ return
+ }
+
+ if firstrun {
+ // on first run lookupKey is a dummy, nothing to delete
+ firstrun = false
+ copy(lookupKey, nextKey)
+ continue
+ }
+
+ if !deleteEbpfEntry(proto, unsafe.Pointer(&lookupKey[0])) {
+ return
+ }
+ deleted++
+ isDup[string(lookupKey)]++
+ copy(lookupKey, nextKey)
+ }
+
+ return
+}
diff --git a/daemon/procmon/find.go b/daemon/procmon/find.go
new file mode 100644
index 0000000..8675b60
--- /dev/null
+++ b/daemon/procmon/find.go
@@ -0,0 +1,108 @@
+package procmon
+
+import (
+ "fmt"
+ "os"
+ "sort"
+ "strconv"
+)
+
+func sortPidsByTime(fdList []os.FileInfo) []os.FileInfo {
+ sort.Slice(fdList, func(i, j int) bool {
+ t := fdList[i].ModTime().UnixNano()
+ u := fdList[j].ModTime().UnixNano()
+ return t > u
+ })
+ return fdList
+}
+
+// inodeFound searches for the given inode in /proc//fd/ or
+// /proc//task//fd/ and gets the symbolink link it points to,
+// in order to compare it against the given inode.
+//
+// If the inode is found, the cache is updated ans sorted.
+func inodeFound(pidsPath, expect, inodeKey string, inode, pid int) bool {
+ fdPath := fmt.Sprint(pidsPath, pid, "/fd/")
+ fdList := lookupPidDescriptors(fdPath, pid)
+ if fdList == nil {
+ return false
+ }
+
+ for idx := 0; idx < len(fdList); idx++ {
+ descLink := fmt.Sprint(fdPath, fdList[idx])
+ if link, err := os.Readlink(descLink); err == nil && link == expect {
+ inodesCache.add(inodeKey, descLink, pid)
+ pidsCache.add(fdPath, fdList, pid)
+ return true
+ }
+ }
+
+ return false
+}
+
+// lookupPidInProc searches for an inode in /proc.
+// First it gets the running PIDs and obtains the opened sockets.
+// TODO: If the inode is not found, search again in the task/threads
+// of every PID (costly).
+func lookupPidInProc(pidsPath, expect, inodeKey string, inode int) int {
+ pidList := getProcPids(pidsPath)
+ for _, pid := range pidList {
+ if inodeFound(pidsPath, expect, inodeKey, inode, pid) {
+ return pid
+ }
+ }
+ return -1
+}
+
+// lookupPidDescriptors returns the list of descriptors inside
+// /proc//fd/
+// TODO: search in /proc//task//fd/ .
+func lookupPidDescriptors(fdPath string, pid int) []string {
+ f, err := os.Open(fdPath)
+ if err != nil {
+ return nil
+ }
+ // This is where most of the time is wasted when looking for PIDs.
+ // long running processes like firefox/chrome tend to have a lot of descriptor
+ // references that points to non existent files on disk, but that remains in
+ // memory (those with " (deleted)").
+ // This causes to have to iterate over 300 to 700 items, that are not sockets.
+ fdList, err := f.Readdir(-1)
+ f.Close()
+ if err != nil {
+ return nil
+ }
+ fdList = sortPidsByTime(fdList)
+
+ s := make([]string, len(fdList))
+ for n, f := range fdList {
+ s[n] = f.Name()
+ }
+
+ return s
+}
+
+// getProcPids returns the list of running PIDs, /proc or /proc//task/ .
+func getProcPids(pidsPath string) (pidList []int) {
+ f, err := os.Open(pidsPath)
+ if err != nil {
+ return pidList
+ }
+ ls, err := f.Readdir(-1)
+ f.Close()
+ if err != nil {
+ return pidList
+ }
+ ls = sortPidsByTime(ls)
+
+ for _, f := range ls {
+ if f.IsDir() == false {
+ continue
+ }
+ if pid, err := strconv.Atoi(f.Name()); err == nil {
+ pidList = append(pidList, []int{pid}...)
+ }
+ }
+
+ return pidList
+}
diff --git a/daemon/procmon/find_test.go b/daemon/procmon/find_test.go
new file mode 100644
index 0000000..9588de8
--- /dev/null
+++ b/daemon/procmon/find_test.go
@@ -0,0 +1,42 @@
+package procmon
+
+import (
+ "fmt"
+ "testing"
+)
+
+func TestGetProcPids(t *testing.T) {
+ pids := getProcPids("/proc")
+
+ if len(pids) == 0 {
+ t.Error("getProcPids() should not be 0", pids)
+ }
+}
+
+func TestLookupPidDescriptors(t *testing.T) {
+ pidsFd := lookupPidDescriptors(fmt.Sprint("/proc/", myPid, "/fd/"), myPid)
+ if len(pidsFd) == 0 {
+ t.Error("getProcPids() should not be 0", pidsFd)
+ }
+}
+
+func TestLookupPidInProc(t *testing.T) {
+ // we expect that the inode 1 points to /dev/null
+ expect := "/dev/null"
+ foundPid := lookupPidInProc("/proc/", expect, "", myPid)
+ if foundPid == -1 {
+ t.Error("lookupPidInProc() should not return -1")
+ }
+}
+
+func BenchmarkGetProcs(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ getProcPids("/proc")
+ }
+}
+
+func BenchmarkLookupPidDescriptors(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ lookupPidDescriptors(fmt.Sprint("/proc/", myPid, "/fd/"), myPid)
+ }
+}
diff --git a/daemon/procmon/monitor/init.go b/daemon/procmon/monitor/init.go
new file mode 100644
index 0000000..4bad752
--- /dev/null
+++ b/daemon/procmon/monitor/init.go
@@ -0,0 +1,79 @@
+package monitor
+
+import (
+ "net"
+
+ "github.com/evilsocket/opensnitch/daemon/log"
+ "github.com/evilsocket/opensnitch/daemon/procmon"
+ "github.com/evilsocket/opensnitch/daemon/procmon/audit"
+ "github.com/evilsocket/opensnitch/daemon/procmon/ebpf"
+)
+
+var (
+ cacheMonitorsRunning = false
+)
+
+// ReconfigureMonitorMethod configures a new method for parsing connections.
+func ReconfigureMonitorMethod(newMonitorMethod string) error {
+
+ if procmon.GetMonitorMethod() == newMonitorMethod {
+ return nil
+ }
+
+ oldMethod := procmon.GetMonitorMethod()
+ End()
+ procmon.SetMonitorMethod(newMonitorMethod)
+ // if the new monitor method fails to start, rollback the change and exit
+ // without saving the configuration. Otherwise we can end up with the wrong
+ // monitor method configured and saved to file.
+ if err := Init(); err != nil {
+ procmon.SetMonitorMethod(oldMethod)
+ return err
+ }
+
+ return nil
+}
+
+// End stops the way of parsing new connections.
+func End() {
+ if procmon.MethodIsAudit() {
+ audit.Stop()
+ } else if procmon.MethodIsEbpf() {
+ ebpf.Stop()
+ }
+}
+
+// Init starts parsing connections using the method specified.
+func Init() (err error) {
+ if cacheMonitorsRunning == false {
+ go procmon.MonitorActivePids()
+ go procmon.CacheCleanerTask()
+ cacheMonitorsRunning = true
+ }
+
+ if procmon.MethodIsEbpf() {
+ err = ebpf.Start()
+ if err == nil {
+ log.Info("Process monitor method ebpf")
+ return nil
+ }
+ // we need to stop this method even if it has failed to start, in order to clean up the kprobes
+ // It helps with the error "cannot write...kprobe_events: file exists".
+ ebpf.Stop()
+ log.Warning("error starting ebpf monitor method: %v", err)
+ } else if procmon.MethodIsAudit() {
+ var auditConn net.Conn
+ auditConn, err = audit.Start()
+ if err == nil {
+ log.Info("Process monitor method audit")
+ go audit.Reader(auditConn, (chan<- audit.Event)(audit.EventChan))
+ return nil
+ }
+ log.Warning("error starting audit monitor method: %v", err)
+ }
+
+ // if any of the above methods have failed, fallback to proc
+ log.Info("Process monitor method /proc")
+ procmon.SetMonitorMethod(procmon.MethodProc)
+ return err
+}
diff --git a/daemon/procmon/parse.go b/daemon/procmon/parse.go
new file mode 100644
index 0000000..b0bb824
--- /dev/null
+++ b/daemon/procmon/parse.go
@@ -0,0 +1,134 @@
+package procmon
+
+import (
+ "fmt"
+ "os"
+ "time"
+
+ "github.com/evilsocket/opensnitch/daemon/core"
+ "github.com/evilsocket/opensnitch/daemon/log"
+ "github.com/evilsocket/opensnitch/daemon/procmon/audit"
+)
+
+func getPIDFromAuditEvents(inode int, inodeKey string, expect string) (int, int) {
+ audit.Lock.RLock()
+ defer audit.Lock.RUnlock()
+
+ auditEvents := audit.GetEvents()
+ for n := 0; n < len(auditEvents); n++ {
+ pid := auditEvents[n].Pid
+ if inodeFound("/proc/", expect, inodeKey, inode, pid) {
+ return pid, n
+ }
+ }
+ for n := 0; n < len(auditEvents); n++ {
+ ppid := auditEvents[n].PPid
+ if inodeFound("/proc/", expect, inodeKey, inode, ppid) {
+ return ppid, n
+ }
+ }
+ return -1, -1
+}
+
+// GetPIDFromINode tries to get the PID from a socket inode following these steps:
+// 1. Get the PID from the cache of Inodes.
+// 2. Get the PID from the cache of PIDs.
+// 3. Look for the PID using one of these methods:
+// - audit: listening for socket creation from auditd.
+// - proc: search /proc
+//
+// If the PID is not found by one of the 2 first methods, it'll try it using /proc.
+func GetPIDFromINode(inode int, inodeKey string) int {
+ found := -1
+ if inode <= 0 {
+ return found
+ }
+ start := time.Now()
+
+ expect := fmt.Sprintf("socket:[%d]", inode)
+ if cachedPidInode := inodesCache.getPid(inodeKey); cachedPidInode != -1 {
+ log.Debug("Inode found in cache: %v %v %v %v", time.Since(start), inodesCache.getPid(inodeKey), inode, inodeKey)
+ return cachedPidInode
+ }
+
+ cachedPid, pos := pidsCache.getPid(inode, inodeKey, expect)
+ if cachedPid != -1 {
+ log.Debug("Socket found in known pids %v, pid: %d, inode: %d, pos: %d, pids in cache: %d", time.Since(start), cachedPid, inode, pos, pidsCache.countItems())
+ pidsCache.sort(cachedPid)
+ inodesCache.add(inodeKey, "", cachedPid)
+ return cachedPid
+ }
+
+ if MethodIsAudit() {
+ if aPid, pos := getPIDFromAuditEvents(inode, inodeKey, expect); aPid != -1 {
+ log.Debug("PID found via audit events: %v, position: %d", time.Since(start), pos)
+ return aPid
+ }
+ }
+ if found == -1 || methodIsProc() {
+ found = lookupPidInProc("/proc/", expect, inodeKey, inode)
+ }
+ log.Debug("new pid lookup took (%d): %v", found, time.Since(start))
+
+ return found
+}
+
+// FindProcess checks if a process exists given a PID.
+// If it exists in /proc, a new Process{} object is returned with the details
+// to identify a process (cmdline, name, environment variables, etc).
+func FindProcess(pid int, interceptUnknown bool) *Process {
+ if interceptUnknown && pid < 0 {
+ return NewProcess(0, "")
+ }
+
+ if proc := findProcessInActivePidsCache(uint64(pid)); proc != nil {
+ return proc
+ }
+
+ if MethodIsAudit() {
+ if aevent := audit.GetEventByPid(pid); aevent != nil {
+ audit.Lock.RLock()
+ proc := NewProcess(pid, aevent.ProcPath)
+ proc.readCmdline()
+ proc.setCwd(aevent.ProcDir)
+ audit.Lock.RUnlock()
+ // if the proc dir contains non alhpa-numeric chars the field is empty
+ if proc.CWD == "" {
+ proc.readCwd()
+ }
+ proc.readEnv()
+ proc.cleanPath()
+
+ addToActivePidsCache(uint64(pid), proc)
+ return proc
+ }
+ }
+ // if the PID dir doesn't exist, the process may have exited or be a kernel connection
+ // XXX: can a kernel connection exist without an entry in ProcFS?
+ if core.Exists(fmt.Sprint("/proc/", pid)) == false {
+ log.Debug("PID can't be read /proc/ %d", pid)
+ return nil
+ }
+
+ linkName := fmt.Sprint("/proc/", pid, "/exe")
+ link, err := os.Readlink(linkName)
+ proc := NewProcess(pid, link)
+ proc.readCmdline()
+ proc.readCwd()
+ proc.readEnv()
+ proc.cleanPath()
+
+ if len(proc.Args) == 0 {
+ proc.readComm()
+ proc.Args = make([]string, 0)
+ proc.Args = append(proc.Args, proc.Comm)
+ }
+
+ // If the link to the binary can't be read, the PID may be of a kernel task
+ if err != nil || proc.Path == "" {
+ proc.Path = "Kernel connection"
+ }
+
+ addToActivePidsCache(uint64(pid), proc)
+ return proc
+}
diff --git a/daemon/procmon/process.go b/daemon/procmon/process.go
new file mode 100644
index 0000000..f12c396
--- /dev/null
+++ b/daemon/procmon/process.go
@@ -0,0 +1,112 @@
+package procmon
+
+import (
+ "sync"
+ "time"
+)
+
+var (
+ cacheMonitorsRunning = false
+ lock = sync.RWMutex{}
+ monitorMethod = MethodProc
+)
+
+// monitor method supported types
+const (
+ MethodProc = "proc"
+ MethodAudit = "audit"
+ MethodEbpf = "ebpf"
+)
+
+// man 5 proc; man procfs
+type procIOstats struct {
+ RChar int64
+ WChar int64
+ SyscallRead int64
+ SyscallWrite int64
+ ReadBytes int64
+ WriteBytes int64
+}
+
+type procDescriptors struct {
+ Name string
+ SymLink string
+ Size int64
+ ModTime time.Time
+}
+
+type procStatm struct {
+ Size int64
+ Resident int64
+ Shared int64
+ Text int64
+ Lib int64
+ Data int64 // data + stack
+ Dt int
+}
+
+// Process holds the details of a process.
+type Process struct {
+ ID int
+ Comm string
+ Path string
+ Args []string
+ Env map[string]string
+ CWD string
+ Descriptors []*procDescriptors
+ IOStats *procIOstats
+ Status string
+ Stat string
+ Statm *procStatm
+ Stack string
+ Maps string
+}
+
+// NewProcess returns a new Process structure.
+func NewProcess(pid int, path string) *Process {
+ return &Process{
+ ID: pid,
+ Path: path,
+ Args: make([]string, 0),
+ Env: make(map[string]string),
+ }
+}
+
+// SetMonitorMethod configures a new method for parsing connections.
+func SetMonitorMethod(newMonitorMethod string) {
+ lock.Lock()
+ defer lock.Unlock()
+
+ monitorMethod = newMonitorMethod
+}
+
+// GetMonitorMethod configures a new method for parsing connections.
+func GetMonitorMethod() string {
+ lock.Lock()
+ defer lock.Unlock()
+
+ return monitorMethod
+}
+
+// MethodIsEbpf returns if the process monitor method is eBPF.
+func MethodIsEbpf() bool {
+ lock.RLock()
+ defer lock.RUnlock()
+
+ return monitorMethod == MethodEbpf
+}
+
+// MethodIsAudit returns if the process monitor method is eBPF.
+func MethodIsAudit() bool {
+ lock.RLock()
+ defer lock.RUnlock()
+
+ return monitorMethod == MethodAudit
+}
+
+func methodIsProc() bool {
+ lock.RLock()
+ defer lock.RUnlock()
+
+ return monitorMethod == MethodProc
+}
diff --git a/daemon/procmon/process_test.go b/daemon/procmon/process_test.go
new file mode 100644
index 0000000..258ac44
--- /dev/null
+++ b/daemon/procmon/process_test.go
@@ -0,0 +1,135 @@
+package procmon
+
+import (
+ "os"
+ "testing"
+)
+
+var (
+ myPid = os.Getpid()
+ proc = NewProcess(myPid, "/fake/path")
+)
+
+func TestNewProcess(t *testing.T) {
+ if proc.ID != myPid {
+ t.Error("NewProcess PID not equal to ", myPid)
+ }
+ if proc.Path != "/fake/path" {
+ t.Error("NewProcess path not equal to /fake/path")
+ }
+}
+
+func TestProcPath(t *testing.T) {
+ if err := proc.readPath(); err != nil {
+ t.Error("Proc path error:", err)
+ }
+ if proc.Path == "/fake/path" {
+ t.Error("Proc path equal to /fake/path, should be different:", proc.Path)
+ }
+}
+
+func TestProcCwd(t *testing.T) {
+ err := proc.readCwd()
+
+ if proc.CWD == "" {
+ t.Error("Proc readCwd() not read:", err)
+ }
+
+ proc.setCwd("/home")
+ if proc.CWD != "/home" {
+ t.Error("Proc setCwd() should be /home:", proc.CWD)
+ }
+}
+
+func TestProcCmdline(t *testing.T) {
+ proc.readCmdline()
+
+ if len(proc.Args) == 0 {
+ t.Error("Proc Args should not be empty:", proc.Args)
+ }
+}
+
+func TestProcDescriptors(t *testing.T) {
+ proc.readDescriptors()
+
+ if len(proc.Descriptors) == 0 {
+ t.Error("Proc Descriptors should not be empty:", proc.Descriptors)
+ }
+}
+
+func TestProcEnv(t *testing.T) {
+ proc.readEnv()
+
+ if len(proc.Env) == 0 {
+ t.Error("Proc Env should not be empty:", proc.Env)
+ }
+}
+
+func TestProcIOStats(t *testing.T) {
+ proc.readIOStats()
+
+ if proc.IOStats.RChar == 0 {
+ t.Error("Proc.IOStats.RChar should not be 0:", proc.IOStats)
+ }
+ if proc.IOStats.WChar == 0 {
+ t.Error("Proc.IOStats.WChar should not be 0:", proc.IOStats)
+ }
+ if proc.IOStats.SyscallRead == 0 {
+ t.Error("Proc.IOStats.SyscallRead should not be 0:", proc.IOStats)
+ }
+ if proc.IOStats.SyscallWrite == 0 {
+ t.Error("Proc.IOStats.SyscallWrite should not be 0:", proc.IOStats)
+ }
+ /*if proc.IOStats.ReadBytes == 0 {
+ t.Error("Proc.IOStats.ReadBytes should not be 0:", proc.IOStats)
+ }
+ if proc.IOStats.WriteBytes == 0 {
+ t.Error("Proc.IOStats.WriteBytes should not be 0:", proc.IOStats)
+ }*/
+}
+
+func TestProcStatus(t *testing.T) {
+ proc.readStatus()
+
+ if proc.Status == "" {
+ t.Error("Proc Status should not be empty:", proc)
+ }
+ if proc.Stat == "" {
+ t.Error("Proc Stat should not be empty:", proc)
+ }
+ /*if proc.Stack == "" {
+ t.Error("Proc Stack should not be empty:", proc)
+ }*/
+ if proc.Maps == "" {
+ t.Error("Proc Maps should not be empty:", proc)
+ }
+ if proc.Statm.Size == 0 {
+ t.Error("Proc Statm Size should not be 0:", proc.Statm)
+ }
+ if proc.Statm.Resident == 0 {
+ t.Error("Proc Statm Resident should not be 0:", proc.Statm)
+ }
+ if proc.Statm.Shared == 0 {
+ t.Error("Proc Statm Shared should not be 0:", proc.Statm)
+ }
+ if proc.Statm.Text == 0 {
+ t.Error("Proc Statm Text should not be 0:", proc.Statm)
+ }
+ if proc.Statm.Lib != 0 {
+ t.Error("Proc Statm Lib should not be 0:", proc.Statm)
+ }
+ if proc.Statm.Data == 0 {
+ t.Error("Proc Statm Data should not be 0:", proc.Statm)
+ }
+ if proc.Statm.Dt != 0 {
+ t.Error("Proc Statm Dt should not be 0:", proc.Statm)
+ }
+}
+
+func TestProcCleanPath(t *testing.T) {
+ proc.Path = "/fake/path/binary (deleted)"
+ proc.cleanPath()
+ if proc.Path != "/fake/path/binary" {
+ t.Error("Proc cleanPath() not cleaned:", proc.Path)
+ }
+}
diff --git a/daemon/rule/loader.go b/daemon/rule/loader.go
new file mode 100644
index 0000000..378d8d4
--- /dev/null
+++ b/daemon/rule/loader.go
@@ -0,0 +1,418 @@
+package rule
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "path/filepath"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "github.com/evilsocket/opensnitch/daemon/conman"
+ "github.com/evilsocket/opensnitch/daemon/core"
+ "github.com/evilsocket/opensnitch/daemon/log"
+
+ "github.com/fsnotify/fsnotify"
+)
+
+// Loader is the object that holds the rules loaded from disk, as well as the
+// rules watcher.
+type Loader struct {
+ sync.RWMutex
+ path string
+ rules map[string]*Rule
+ rulesKeys []string
+ watcher *fsnotify.Watcher
+ liveReload bool
+ liveReloadRunning bool
+}
+
+// NewLoader loads rules from disk, and watches for changes made to the rules files
+// on disk.
+func NewLoader(liveReload bool) (*Loader, error) {
+ watcher, err := fsnotify.NewWatcher()
+ if err != nil {
+ return nil, err
+ }
+ return &Loader{
+ path: "",
+ rules: make(map[string]*Rule),
+ liveReload: liveReload,
+ watcher: watcher,
+ liveReloadRunning: false,
+ }, nil
+}
+
+// NumRules returns he number of loaded rules.
+func (l *Loader) NumRules() int {
+ l.RLock()
+ defer l.RUnlock()
+ return len(l.rules)
+}
+
+// GetAll returns the loaded rules.
+func (l *Loader) GetAll() map[string]*Rule {
+ l.RLock()
+ defer l.RUnlock()
+ return l.rules
+}
+
+// Load loads rules files from disk.
+func (l *Loader) Load(path string) error {
+ if core.Exists(path) == false {
+ return fmt.Errorf("Path '%s' does not exist", path)
+ }
+
+ expr := filepath.Join(path, "*.json")
+ matches, err := filepath.Glob(expr)
+ if err != nil {
+ return fmt.Errorf("Error globbing '%s': %s", expr, err)
+ }
+
+ l.path = path
+ if len(l.rules) == 0 {
+ l.rules = make(map[string]*Rule)
+ }
+
+ for _, fileName := range matches {
+ log.Debug("Reading rule from %s", fileName)
+
+ if err := l.loadRule(fileName); err != nil {
+ log.Warning("%s", err)
+ continue
+ }
+ }
+
+ if l.liveReload && l.liveReloadRunning == false {
+ go l.liveReloadWorker()
+ }
+
+ return nil
+}
+
+func (l *Loader) loadRule(fileName string) error {
+ raw, err := ioutil.ReadFile(fileName)
+ if err != nil {
+ return fmt.Errorf("Error while reading %s: %s", fileName, err)
+ }
+ l.Lock()
+ defer l.Unlock()
+
+ var r Rule
+ err = json.Unmarshal(raw, &r)
+ if err != nil {
+ return fmt.Errorf("Error parsing rule from %s: %s", fileName, err)
+ }
+ raw = nil
+
+ if oldRule, found := l.rules[r.Name]; found {
+ l.cleanListsRule(oldRule)
+ }
+
+ if r.Enabled {
+ if err := r.Operator.Compile(); err != nil {
+ log.Warning("Operator.Compile() error: %s: %s", err, r.Operator.Data)
+ return fmt.Errorf("(1) Error compiling rule: %s", err)
+ }
+ if r.Operator.Type == List {
+ for i := 0; i < len(r.Operator.List); i++ {
+ if err := r.Operator.List[i].Compile(); err != nil {
+ log.Warning("Operator.Compile() error: %s: ", err)
+ return fmt.Errorf("(1) Error compiling list rule: %s", err)
+ }
+ }
+ }
+ }
+ if oldRule, found := l.rules[r.Name]; found {
+ l.deleteOldRuleFromDisk(oldRule, &r)
+ }
+
+ log.Debug("Loaded rule from %s: %s", fileName, r.String())
+ l.rules[r.Name] = &r
+ l.sortRules()
+
+ if l.isTemporary(&r) {
+ err = l.scheduleTemporaryRule(r)
+ }
+
+ return nil
+}
+
+// deleteRule deletes a rule from memory if it has been deleted from disk.
+// This is only called if fsnotify's Remove event is fired, thus it doesn't
+// have to delete temporary rules (!Always).
+func (l *Loader) deleteRule(filePath string) {
+ fileName := filepath.Base(filePath)
+ ruleName := fileName[:len(fileName)-5]
+
+ l.RLock()
+ rule, found := l.rules[ruleName]
+ delRule := found && rule.Duration == Always
+ l.RUnlock()
+ if delRule {
+ l.Delete(ruleName)
+ }
+}
+
+func (l *Loader) deleteRuleFromDisk(ruleName string) error {
+ path := fmt.Sprint(l.path, "/", ruleName, ".json")
+ return os.Remove(path)
+}
+
+// deleteOldRuleFromDisk deletes a rule from disk if the Duration changes
+// from Always (saved on disk), to !Always (temporary).
+func (l *Loader) deleteOldRuleFromDisk(oldRule, newRule *Rule) {
+ if oldRule.Duration == Always && newRule.Duration != Always {
+ if err := l.deleteRuleFromDisk(oldRule.Name); err != nil {
+ log.Error("Error deleting old rule from disk: %s", oldRule.Name)
+ }
+ }
+}
+
+// cleanListsRule erases the list of domains of an Operator of type Lists
+func (l *Loader) cleanListsRule(oldRule *Rule) {
+ if oldRule.Operator.Type == Lists {
+ oldRule.Operator.StopMonitoringLists()
+ } else if oldRule.Operator.Type == List {
+ for i := 0; i < len(oldRule.Operator.List); i++ {
+ if oldRule.Operator.List[i].Type == Lists {
+ oldRule.Operator.List[i].StopMonitoringLists()
+ break
+ }
+ }
+ }
+}
+
+func (l *Loader) liveReloadWorker() {
+ l.liveReloadRunning = true
+
+ log.Debug("Rules watcher started on path %s ...", l.path)
+ if err := l.watcher.Add(l.path); err != nil {
+ log.Error("Could not watch path: %s", err)
+ l.liveReloadRunning = false
+ return
+ }
+
+ for {
+ select {
+ case event := <-l.watcher.Events:
+ // a new rule json file has been created or updated
+ if event.Op&fsnotify.Write == fsnotify.Write {
+ if strings.HasSuffix(event.Name, ".json") {
+ log.Important("Ruleset changed due to %s, reloading ...", path.Base(event.Name))
+ if err := l.loadRule(event.Name); err != nil {
+ log.Warning("%s", err)
+ }
+ }
+ } else if event.Op&fsnotify.Remove == fsnotify.Remove {
+ if strings.HasSuffix(event.Name, ".json") {
+ log.Important("Rule deleted %s", path.Base(event.Name))
+ // we only need to delete from memory rules of type Always,
+ // because the Remove event is of a file, i.e.: Duration == Always
+ l.deleteRule(event.Name)
+ }
+ }
+ case err := <-l.watcher.Errors:
+ log.Error("File system watcher error: %s", err)
+ }
+ }
+}
+
+func (l *Loader) isTemporary(r *Rule) bool {
+ return r.Duration != Restart && r.Duration != Always && r.Duration != Once
+}
+
+func (l *Loader) isUniqueName(name string) bool {
+ _, found := l.rules[name]
+ return !found
+}
+
+func (l *Loader) setUniqueName(rule *Rule) {
+ l.Lock()
+ defer l.Unlock()
+
+ idx := 1
+ base := rule.Name
+ for l.isUniqueName(rule.Name) == false {
+ idx++
+ rule.Name = fmt.Sprintf("%s-%d", base, idx)
+ }
+}
+
+func (l *Loader) sortRules() {
+ l.rulesKeys = make([]string, 0, len(l.rules))
+ for k := range l.rules {
+ l.rulesKeys = append(l.rulesKeys, k)
+ }
+ sort.Strings(l.rulesKeys)
+}
+
+func (l *Loader) addUserRule(rule *Rule) {
+ if rule.Duration == Once {
+ return
+ }
+
+ l.setUniqueName(rule)
+ l.replaceUserRule(rule)
+}
+
+func (l *Loader) replaceUserRule(rule *Rule) (err error) {
+ l.Lock()
+ oldRule, found := l.rules[rule.Name]
+ l.Unlock()
+
+ if found {
+ // If the rule has changed from Always (saved on disk) to !Always (temporary),
+ // we need to delete the rule from disk and keep it in memory.
+ l.deleteOldRuleFromDisk(oldRule, rule)
+
+ // delete loaded lists, if this is a rule of type Lists
+ l.cleanListsRule(oldRule)
+ }
+
+ if rule.Enabled {
+ if err := rule.Operator.Compile(); err != nil {
+ log.Warning("Operator.Compile() error: %s: %s", err, rule.Operator.Data)
+ return fmt.Errorf("(2) Error compiling rule: %s", err)
+ }
+
+ if rule.Operator.Type == List {
+ // TODO: use List protobuf object instead of un/marshalling to/from json
+ if err = json.Unmarshal([]byte(rule.Operator.Data), &rule.Operator.List); err != nil {
+ return fmt.Errorf("Error loading rule of type list: %s", err)
+ }
+
+ for i := 0; i < len(rule.Operator.List); i++ {
+ if err := rule.Operator.List[i].Compile(); err != nil {
+ log.Warning("Operator.Compile() error: %s: ", err)
+ return fmt.Errorf("(2) Error compiling list rule: %s", err)
+ }
+ }
+ }
+ }
+ l.Lock()
+ l.rules[rule.Name] = rule
+ l.sortRules()
+ l.Unlock()
+
+ if l.isTemporary(rule) {
+ err = l.scheduleTemporaryRule(*rule)
+ }
+
+ return err
+}
+
+func (l *Loader) scheduleTemporaryRule(rule Rule) error {
+ tTime, err := time.ParseDuration(string(rule.Duration))
+ if err != nil {
+ return err
+ }
+
+ time.AfterFunc(tTime, func() {
+ l.Lock()
+ defer l.Unlock()
+
+ log.Info("Temporary rule expired: %s - %s", rule.Name, rule.Duration)
+ if newRule, found := l.rules[rule.Name]; found {
+ if newRule.Duration != rule.Duration {
+ log.Debug("%s temporary rule expired, but has new Duration, old: %s, new: %s", rule.Name, rule.Duration, newRule.Duration)
+ return
+ }
+ delete(l.rules, rule.Name)
+ l.sortRules()
+ }
+ })
+ return nil
+}
+
+// Add adds a rule to the list of rules, and optionally saves it to disk.
+func (l *Loader) Add(rule *Rule, saveToDisk bool) error {
+ l.addUserRule(rule)
+ if saveToDisk {
+ fileName := filepath.Join(l.path, fmt.Sprintf("%s.json", rule.Name))
+ return l.Save(rule, fileName)
+ }
+ return nil
+}
+
+// Replace adds a rule to the list of rules, and optionally saves it to disk.
+func (l *Loader) Replace(rule *Rule, saveToDisk bool) error {
+ if err := l.replaceUserRule(rule); err != nil {
+ return err
+ }
+ if saveToDisk {
+ l.Lock()
+ defer l.Unlock()
+
+ fileName := filepath.Join(l.path, fmt.Sprintf("%s.json", rule.Name))
+ return l.Save(rule, fileName)
+ }
+ return nil
+}
+
+// Save a rule to disk.
+func (l *Loader) Save(rule *Rule, path string) error {
+ rule.Updated = time.Now()
+ raw, err := json.MarshalIndent(rule, "", " ")
+ if err != nil {
+ return fmt.Errorf("Error while saving rule %s to %s: %s", rule, path, err)
+ }
+
+ if err = ioutil.WriteFile(path, raw, 0644); err != nil {
+ return fmt.Errorf("Error while saving rule %s to %s: %s", rule, path, err)
+ }
+
+ return nil
+}
+
+// Delete deletes a rule from the list by name.
+// If the duration is Always (i.e: saved on disk), it'll attempt to delete
+// it from disk.
+func (l *Loader) Delete(ruleName string) error {
+ l.Lock()
+ defer l.Unlock()
+
+ rule := l.rules[ruleName]
+ if rule == nil {
+ return nil
+ }
+ l.cleanListsRule(rule)
+
+ delete(l.rules, ruleName)
+ l.sortRules()
+
+ if rule.Duration != Always {
+ return nil
+ }
+
+ log.Info("Delete() rule: %s", rule)
+ return l.deleteRuleFromDisk(ruleName)
+}
+
+// FindFirstMatch will try match the connection against the existing rule set.
+func (l *Loader) FindFirstMatch(con *conman.Connection) (match *Rule) {
+ l.RLock()
+ defer l.RUnlock()
+
+ for _, idx := range l.rulesKeys {
+ rule, _ := l.rules[idx]
+ if rule.Enabled == false {
+ continue
+ }
+ if rule.Match(con) {
+ // We have a match.
+ // Save the rule in order to don't ask the user to take action,
+ // and keep iterating until a Deny or a Priority rule appears.
+ match = rule
+ if rule.Action == Reject || rule.Action == Deny || rule.Precedence == true {
+ return rule
+ }
+ }
+ }
+
+ return match
+}
diff --git a/daemon/rule/loader_test.go b/daemon/rule/loader_test.go
new file mode 100644
index 0000000..29fa796
--- /dev/null
+++ b/daemon/rule/loader_test.go
@@ -0,0 +1,275 @@
+package rule
+
+import (
+ "io"
+ "math/rand"
+ "os"
+ "testing"
+ "time"
+)
+
+var tmpDir string
+
+func TestMain(m *testing.M) {
+ tmpDir = "/tmp/ostest_" + randString()
+ os.Mkdir(tmpDir, 0777)
+ defer os.RemoveAll(tmpDir)
+ os.Exit(m.Run())
+}
+
+func TestRuleLoader(t *testing.T) {
+ t.Parallel()
+ t.Log("Test rules loader")
+
+ var list []Operator
+ dur1s := Duration("1s")
+ dummyOper, _ := NewOperator(Simple, false, OpTrue, "", list)
+ dummyOper.Compile()
+ inMem1sRule := Create("000-xxx-name", true, false, Allow, dur1s, dummyOper)
+ inMemUntilRestartRule := Create("000-aaa-name", true, false, Allow, Restart, dummyOper)
+
+ l, err := NewLoader(false)
+ if err != nil {
+ t.Fail()
+ }
+ if err = l.Load("/non/existent/path/"); err == nil {
+ t.Error("non existent path test: err should not be nil")
+ }
+
+ if err = l.Load("testdata/"); err != nil {
+ t.Error("Error loading test rules: ", err)
+ }
+
+ testNumRules(t, l, 2)
+
+ if err = l.Add(inMem1sRule, false); err != nil {
+ t.Error("Error adding temporary rule")
+ }
+ testNumRules(t, l, 3)
+
+ // test auto deletion of temporary rule
+ time.Sleep(time.Second * 2)
+ testNumRules(t, l, 2)
+
+ if err = l.Add(inMemUntilRestartRule, false); err != nil {
+ t.Error("Error adding temporary rule (2)")
+ }
+ testNumRules(t, l, 3)
+ testRulesOrder(t, l)
+ testSortRules(t, l)
+ testFindMatch(t, l)
+ testFindEnabled(t, l)
+ testDurationChange(t, l)
+}
+
+func TestRuleLoaderInvalidRegexp(t *testing.T) {
+ t.Parallel()
+ t.Log("Test rules loader: invalid regexp")
+
+ l, err := NewLoader(true)
+ if err != nil {
+ t.Fail()
+ }
+ t.Run("loadRule() from disk test (simple)", func(t *testing.T) {
+ if err := l.loadRule("testdata/invalid-regexp.json"); err == nil {
+ t.Error("invalid regexp rule loaded: loadRule()")
+ }
+ })
+
+ t.Run("loadRule() from disk test (list)", func(t *testing.T) {
+ if err := l.loadRule("testdata/invalid-regexp-list.json"); err == nil {
+ t.Error("invalid regexp rule loaded: loadRule()")
+ }
+ })
+
+ var list []Operator
+ dur30m := Duration("30m")
+ opListData := `[{"type": "regexp", "operand": "process.path", "sensitive": false, "data": "^(/di(rmngr)$"}, {"type": "simple", "operand": "dest.port", "data": "53", "sensitive": false}]`
+ invalidRegexpOp, _ := NewOperator(List, false, OpList, opListData, list)
+ invalidRegexpRule := Create("invalid-regexp", true, false, Allow, dur30m, invalidRegexpOp)
+
+ t.Run("replaceUserRule() test list", func(t *testing.T) {
+ if err := l.replaceUserRule(invalidRegexpRule); err == nil {
+ t.Error("invalid regexp rule loaded: replaceUserRule()")
+ }
+ })
+}
+
+func TestLiveReload(t *testing.T) {
+ t.Parallel()
+ t.Log("Test rules loader with live reload")
+ l, err := NewLoader(true)
+ if err != nil {
+ t.Fail()
+ }
+ if err = Copy("testdata/000-allow-chrome.json", tmpDir+"/000-allow-chrome.json"); err != nil {
+ t.Error("Error copying rule into a temp dir")
+ }
+ if err = Copy("testdata/001-deny-chrome.json", tmpDir+"/001-deny-chrome.json"); err != nil {
+ t.Error("Error copying rule into a temp dir")
+ }
+ if err = l.Load(tmpDir); err != nil {
+ t.Error("Error loading test rules: ", err)
+ }
+ //wait for watcher to activate
+ time.Sleep(time.Second)
+ if err = Copy("testdata/live_reload/test-live-reload-remove.json", tmpDir+"/test-live-reload-remove.json"); err != nil {
+ t.Error("Error copying rules into temp dir")
+ }
+ if err = Copy("testdata/live_reload/test-live-reload-delete.json", tmpDir+"/test-live-reload-delete.json"); err != nil {
+ t.Error("Error copying rules into temp dir")
+ }
+ //wait for watcher to pick up the changes
+ time.Sleep(time.Second)
+ testNumRules(t, l, 4)
+ if err = os.Remove(tmpDir + "/test-live-reload-remove.json"); err != nil {
+ t.Error("Error Remove()ing file from temp dir")
+ }
+ if err = l.Delete("test-live-reload-delete"); err != nil {
+ t.Error("Error Delete()ing file from temp dir")
+ }
+ //wait for watcher to pick up the changes
+ time.Sleep(time.Second)
+ testNumRules(t, l, 2)
+}
+
+func randString() string {
+ rand.Seed(time.Now().UnixNano())
+ var letterRunes = []rune("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ")
+ b := make([]rune, 10)
+ for i := range b {
+ b[i] = letterRunes[rand.Intn(len(letterRunes))]
+ }
+ return string(b)
+}
+
+func Copy(src, dst string) error {
+ in, err := os.Open(src)
+ if err != nil {
+ return err
+ }
+ defer in.Close()
+
+ out, err := os.Create(dst)
+ if err != nil {
+ return err
+ }
+ defer out.Close()
+
+ _, err = io.Copy(out, in)
+ if err != nil {
+ return err
+ }
+ return out.Close()
+}
+
+func testNumRules(t *testing.T, l *Loader, num int) {
+ if l.NumRules() != num {
+ t.Error("rules number should be (2): ", num)
+ }
+}
+
+func testRulesOrder(t *testing.T, l *Loader) {
+ if l.rulesKeys[0] != "000-aaa-name" {
+ t.Error("Rules not in order (0): ", l.rulesKeys)
+ }
+ if l.rulesKeys[1] != "000-allow-chrome" {
+ t.Error("Rules not in order (1): ", l.rulesKeys)
+ }
+ if l.rulesKeys[2] != "001-deny-chrome" {
+ t.Error("Rules not in order (2): ", l.rulesKeys)
+ }
+}
+
+func testSortRules(t *testing.T, l *Loader) {
+ l.rulesKeys[1] = "001-deny-chrome"
+ l.rulesKeys[2] = "000-allow-chrome"
+ l.sortRules()
+ if l.rulesKeys[1] != "000-allow-chrome" {
+ t.Error("Rules not in order (1): ", l.rulesKeys)
+ }
+ if l.rulesKeys[2] != "001-deny-chrome" {
+ t.Error("Rules not in order (2): ", l.rulesKeys)
+ }
+}
+
+func testFindMatch(t *testing.T, l *Loader) {
+ conn.Process.Path = "/opt/google/chrome/chrome"
+
+ testFindPriorityMatch(t, l)
+ testFindDenyMatch(t, l)
+ testFindAllowMatch(t, l)
+
+ restoreConnection()
+}
+
+func testFindPriorityMatch(t *testing.T, l *Loader) {
+ match := l.FindFirstMatch(conn)
+ if match == nil {
+ t.Error("FindPriorityMatch didn't match")
+ }
+ // test 000-allow-chrome, priority == true
+ if match.Name != "000-allow-chrome" {
+ t.Error("findPriorityMatch: priority rule failed: ", match)
+ }
+
+}
+
+func testFindDenyMatch(t *testing.T, l *Loader) {
+ l.rules["000-allow-chrome"].Precedence = false
+ // test 000-allow-chrome, priority == false
+ // 001-deny-chrome must match
+ match := l.FindFirstMatch(conn)
+ if match == nil {
+ t.Error("FindDenyMatch deny didn't match")
+ }
+ if match.Name != "001-deny-chrome" {
+ t.Error("findDenyMatch: deny rule failed: ", match)
+ }
+}
+
+func testFindAllowMatch(t *testing.T, l *Loader) {
+ l.rules["000-allow-chrome"].Precedence = false
+ l.rules["001-deny-chrome"].Action = Allow
+ // test 000-allow-chrome, priority == false
+ // 001-deny-chrome must match
+ match := l.FindFirstMatch(conn)
+ if match == nil {
+ t.Error("FindAllowMatch allow didn't match")
+ }
+ if match.Name != "001-deny-chrome" {
+ t.Error("findAllowMatch: allow rule failed: ", match)
+ }
+}
+
+func testFindEnabled(t *testing.T, l *Loader) {
+ l.rules["000-allow-chrome"].Precedence = false
+ l.rules["001-deny-chrome"].Action = Allow
+ l.rules["001-deny-chrome"].Enabled = false
+ // test 000-allow-chrome, priority == false
+ // 001-deny-chrome must match
+ match := l.FindFirstMatch(conn)
+ if match == nil {
+ t.Error("FindEnabledMatch, match nil")
+ }
+ if match.Name == "001-deny-chrome" {
+ t.Error("findEnabledMatch: deny rule shouldn't have matched: ", match)
+ }
+}
+
+// test that changing the Duration of a temporary rule doesn't delete
+// the new one, ignoring the old timer.
+func testDurationChange(t *testing.T, l *Loader) {
+ l.rules["000-aaa-name"].Duration = "2s"
+ if err := l.replaceUserRule(l.rules["000-aaa-name"]); err != nil {
+ t.Error("testDurationChange, error replacing rule: ", err)
+ }
+ l.rules["000-aaa-name"].Duration = "1h"
+ if err := l.replaceUserRule(l.rules["000-aaa-name"]); err != nil {
+ t.Error("testDurationChange, error replacing rule: ", err)
+ }
+ time.Sleep(time.Second * 4)
+ if _, found := l.rules["000-aaa-name"]; !found {
+ t.Error("testDurationChange, error: rule has been deleted")
+ }
+}
diff --git a/daemon/rule/operator.go b/daemon/rule/operator.go
new file mode 100644
index 0000000..45cbc34
--- /dev/null
+++ b/daemon/rule/operator.go
@@ -0,0 +1,297 @@
+package rule
+
+import (
+ "fmt"
+ "net"
+ "reflect"
+ "regexp"
+ "strings"
+ "sync"
+
+ "github.com/evilsocket/opensnitch/daemon/conman"
+ "github.com/evilsocket/opensnitch/daemon/core"
+ "github.com/evilsocket/opensnitch/daemon/log"
+)
+
+// Type is the type of rule.
+// Every type has its own way of checking the user data against connections.
+type Type string
+
+// Sensitive defines if a rule is case-sensitive or not. By default no.
+type Sensitive bool
+
+// Operand is what we check on a connection.
+type Operand string
+
+// Available types
+const (
+ Simple = Type("simple")
+ Regexp = Type("regexp")
+ Complex = Type("complex") // for future use
+ List = Type("list")
+ Network = Type("network")
+ Lists = Type("lists")
+)
+
+// Available operands
+const (
+ OpTrue = Operand("true")
+ OpProcessID = Operand("process.id")
+ OpProcessPath = Operand("process.path")
+ OpProcessCmd = Operand("process.command")
+ OpProcessEnvPrefix = Operand("process.env.")
+ OpProcessEnvPrefixLen = 12
+ OpUserID = Operand("user.id")
+ OpDstIP = Operand("dest.ip")
+ OpDstHost = Operand("dest.host")
+ OpDstPort = Operand("dest.port")
+ OpDstNetwork = Operand("dest.network")
+ OpProto = Operand("protocol")
+ OpList = Operand("list")
+ OpDomainsLists = Operand("lists.domains")
+ OpDomainsRegexpLists = Operand("lists.domains_regexp")
+ OpIPLists = Operand("lists.ips")
+ OpNetLists = Operand("lists.nets")
+)
+
+type opCallback func(value interface{}) bool
+
+// Operator represents what we want to filter of a connection, and how.
+type Operator struct {
+ Type Type `json:"type"`
+ Operand Operand `json:"operand"`
+ Sensitive Sensitive `json:"sensitive"`
+ Data string `json:"data"`
+ List []Operator `json:"list"`
+
+ sync.RWMutex
+ cb opCallback
+ re *regexp.Regexp
+ netMask *net.IPNet
+ isCompiled bool
+ lists map[string]interface{}
+ listsMonitorRunning bool
+ exitMonitorChan chan (bool)
+}
+
+// NewOperator returns a new operator object
+func NewOperator(t Type, s Sensitive, o Operand, data string, list []Operator) (*Operator, error) {
+ op := Operator{
+ Type: t,
+ Sensitive: s,
+ Operand: o,
+ Data: data,
+ List: list,
+ }
+ return &op, nil
+}
+
+// Compile translates the operator type field to its callback counterpart
+func (o *Operator) Compile() error {
+ if o.isCompiled {
+ return nil
+ }
+ if o.Type == Simple {
+ o.cb = o.simpleCmp
+ } else if o.Type == Regexp {
+ o.cb = o.reCmp
+ if o.Sensitive == false {
+ o.Data = strings.ToLower(o.Data)
+ }
+ re, err := regexp.Compile(o.Data)
+ if err != nil {
+ return err
+ }
+ o.re = re
+ } else if o.Operand == OpDomainsLists {
+ if o.Data == "" {
+ return fmt.Errorf("Operand lists is empty, nothing to load: %s", o)
+ }
+ o.loadLists()
+ o.cb = o.domainsListCmp
+ } else if o.Operand == OpDomainsRegexpLists {
+ if o.Data == "" {
+ return fmt.Errorf("Operand regexp lists is empty, nothing to load: %s", o)
+ }
+ o.loadLists()
+ o.cb = o.reListCmp
+ } else if o.Operand == OpIPLists {
+ if o.Data == "" {
+ return fmt.Errorf("Operand ip lists is empty, nothing to load: %s", o)
+ }
+ o.loadLists()
+ o.cb = o.ipListCmp
+ } else if o.Operand == OpNetLists {
+ if o.Data == "" {
+ return fmt.Errorf("Operand net lists is empty, nothing to load: %s", o)
+ }
+ o.loadLists()
+ o.cb = o.ipNetCmp
+ } else if o.Type == List {
+ o.Operand = OpList
+ } else if o.Type == Network {
+ var err error
+ _, o.netMask, err = net.ParseCIDR(o.Data)
+ if err != nil {
+ return err
+ }
+ o.cb = o.cmpNetwork
+ }
+ log.Debug("Operator compiled: %s", o)
+ o.isCompiled = true
+
+ return nil
+}
+
+func (o *Operator) String() string {
+ how := "is"
+ if o.Type == Regexp {
+ how = "matches"
+ }
+ return fmt.Sprintf("%s %s '%s'", log.Bold(string(o.Operand)), how, log.Yellow(string(o.Data)))
+}
+
+func (o *Operator) simpleCmp(v interface{}) bool {
+ if o.Sensitive == false {
+ return strings.EqualFold(v.(string), o.Data)
+ }
+ return v == o.Data
+}
+
+func (o *Operator) reCmp(v interface{}) bool {
+ if vt := reflect.ValueOf(v).Kind(); vt != reflect.String {
+ log.Warning("Operator.reCmp() bad interface type: %T", v)
+ return false
+ }
+ if o.Sensitive == false {
+ v = strings.ToLower(v.(string))
+ }
+ return o.re.MatchString(v.(string))
+}
+
+func (o *Operator) cmpNetwork(destIP interface{}) bool {
+ // 192.0.2.1/24, 2001:db8:a0b:12f0::1/32
+ if o.netMask == nil {
+ log.Warning("cmpNetwork() NULL: %s", destIP)
+ return false
+ }
+ return o.netMask.Contains(destIP.(net.IP))
+}
+
+func (o *Operator) domainsListCmp(v interface{}) bool {
+ dstHost := v.(string)
+ if dstHost == "" {
+ return false
+ }
+ if o.Sensitive == false {
+ dstHost = strings.ToLower(dstHost)
+ }
+ o.RLock()
+ defer o.RUnlock()
+
+ if _, found := o.lists[dstHost]; found {
+ log.Debug("%s: %s, %s", log.Red("domain list match"), dstHost, o.lists[dstHost])
+ return true
+ }
+ return false
+}
+
+func (o *Operator) ipListCmp(v interface{}) bool {
+ dstIP := v.(string)
+ if dstIP == "" {
+ return false
+ }
+ o.RLock()
+ defer o.RUnlock()
+
+ if _, found := o.lists[dstIP]; found {
+ log.Debug("%s: %s, %s", log.Red("IP list match"), dstIP, o.lists[dstIP].(string))
+ return true
+ }
+ return false
+}
+
+func (o *Operator) ipNetCmp(dstIP interface{}) bool {
+ o.RLock()
+ defer o.RUnlock()
+
+ for host, netMask := range o.lists {
+ n := netMask.(*net.IPNet)
+ if n.Contains(dstIP.(net.IP)) {
+ log.Debug("%s: %s, %s", log.Red("Net list match"), dstIP, host)
+ return true
+ }
+ }
+ return false
+}
+
+func (o *Operator) reListCmp(v interface{}) bool {
+ dstHost := v.(string)
+ if dstHost == "" {
+ return false
+ }
+ if o.Sensitive == false {
+ dstHost = strings.ToLower(dstHost)
+ }
+ o.RLock()
+ defer o.RUnlock()
+
+ for file, re := range o.lists {
+ r := re.(*regexp.Regexp)
+ if r.MatchString(dstHost) {
+ log.Debug("%s: %s, %s", log.Red("Regexp list match"), dstHost, file)
+ return true
+ }
+ }
+ return false
+}
+
+func (o *Operator) listMatch(con interface{}) bool {
+ res := true
+ for i := 0; i < len(o.List); i++ {
+ res = res && o.List[i].Match(con.(*conman.Connection))
+ }
+ return res
+}
+
+// Match tries to match parts of a connection with the given operator.
+func (o *Operator) Match(con *conman.Connection) bool {
+
+ if o.Operand == OpTrue {
+ return true
+ } else if o.Operand == OpList {
+ return o.listMatch(con)
+ } else if o.Operand == OpProcessPath {
+ return o.cb(con.Process.Path)
+ } else if o.Operand == OpProcessCmd {
+ return o.cb(strings.Join(con.Process.Args, " "))
+ } else if o.Operand == OpDstHost && con.DstHost != "" {
+ return o.cb(con.DstHost)
+ } else if o.Operand == OpDstIP {
+ return o.cb(con.DstIP.String())
+ } else if o.Operand == OpDstPort {
+ return o.cb(fmt.Sprintf("%d", con.DstPort))
+ } else if o.Operand == OpUserID {
+ return o.cb(fmt.Sprintf("%d", con.Entry.UserId))
+ } else if o.Operand == OpProcessID {
+ return o.cb(fmt.Sprint(con.Process.ID))
+ } else if o.Operand == OpDomainsLists {
+ return o.cb(con.DstHost)
+ } else if o.Operand == OpIPLists {
+ return o.cb(con.DstIP.String())
+ } else if o.Operand == OpDstNetwork {
+ return o.cb(con.DstIP)
+ } else if o.Operand == OpNetLists {
+ return o.cb(con.DstIP)
+ } else if o.Operand == OpDomainsRegexpLists {
+ return o.cb(con.DstHost)
+ } else if o.Operand == OpProto {
+ return o.cb(con.Protocol)
+ } else if strings.HasPrefix(string(o.Operand), string(OpProcessEnvPrefix)) {
+ envVarName := core.Trim(string(o.Operand[OpProcessEnvPrefixLen:]))
+ envVarValue, _ := con.Process.Env[envVarName]
+ return o.cb(envVarValue)
+ }
+
+ return false
+}
diff --git a/daemon/rule/operator_lists.go b/daemon/rule/operator_lists.go
new file mode 100644
index 0000000..dd05635
--- /dev/null
+++ b/daemon/rule/operator_lists.go
@@ -0,0 +1,263 @@
+package rule
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net"
+ "path/filepath"
+ "regexp"
+ "runtime/debug"
+ "strings"
+ "time"
+
+ "github.com/evilsocket/opensnitch/daemon/core"
+ "github.com/evilsocket/opensnitch/daemon/log"
+)
+
+func (o *Operator) monitorLists() {
+ log.Info("monitor lists started: %s", o.Data)
+
+ modTimes := make(map[string]time.Time)
+ totalFiles := 0
+ needReload := false
+ numFiles := 0
+
+ expr := filepath.Join(o.Data, "/*.*")
+ for {
+ select {
+ case <-o.exitMonitorChan:
+ goto Exit
+ default:
+ fileList, err := filepath.Glob(expr)
+ if err != nil {
+ log.Warning("Error reading directory of domains list: %s, %s", o.Data, err)
+ goto Exit
+ }
+ numFiles = 0
+
+ for _, filename := range fileList {
+ // ignore hidden files
+ name := filepath.Base(filename)
+ if name[:1] == "." {
+ delete(modTimes, filename)
+ continue
+ }
+ // an overwrite operation performs two tasks: truncate the file and save the new content,
+ // causing the file time to be modified twice.
+ modTime, err := core.GetFileModTime(filename)
+ if err != nil {
+ log.Debug("deleting saved mod time due to error reading the list, %s", filename)
+ delete(modTimes, filename)
+ } else if lastModTime, found := modTimes[filename]; found {
+ if lastModTime.Equal(modTime) == false {
+ log.Debug("list changed: %s, %s, %s", lastModTime, modTime, filename)
+ needReload = true
+ }
+ }
+ modTimes[filename] = modTime
+ numFiles++
+ }
+ fileList = nil
+
+ if numFiles != totalFiles {
+ needReload = true
+ }
+ totalFiles = numFiles
+
+ if needReload {
+ // we can't reload a single list, because the domains of all lists are added to the same map.
+ // we could have the domains separated by lists/files, but then we'd need to iterate the map in order
+ // to match a domain. Reloading the lists shoud only occur once a day.
+ if err := o.readLists(); err != nil {
+ log.Warning("%s", err)
+ }
+ needReload = false
+ }
+ time.Sleep(4 * time.Second)
+ }
+ }
+
+Exit:
+ modTimes = nil
+ o.ClearLists()
+ log.Info("lists monitor stopped")
+}
+
+// ClearLists deletes all the entries of a list
+func (o *Operator) ClearLists() {
+ o.Lock()
+ defer o.Unlock()
+
+ log.Info("clearing domains lists: %d - %s", len(o.lists), o.Data)
+ for k := range o.lists {
+ delete(o.lists, k)
+ }
+ debug.FreeOSMemory()
+}
+
+// StopMonitoringLists stops the monitoring lists goroutine.
+func (o *Operator) StopMonitoringLists() {
+ if o.listsMonitorRunning == true {
+ o.exitMonitorChan <- true
+ o.exitMonitorChan = nil
+ o.listsMonitorRunning = false
+ }
+}
+
+func (o *Operator) readDomainsList(raw, fileName string) (dups uint64) {
+ log.Debug("Loading domains list: %s, size: %d", fileName, len(raw))
+ lines := strings.Split(string(raw), "\n")
+ for _, domain := range lines {
+ if len(domain) < 9 {
+ continue
+ }
+ // exclude not valid lines
+ if domain[:7] != "0.0.0.0" && domain[:9] != "127.0.0.1" {
+ continue
+ }
+ host := domain[8:]
+ // exclude localhost entries
+ if domain[:9] == "127.0.0.1" {
+ host = domain[10:]
+ }
+ if host == "local" || host == "localhost" || host == "localhost.localdomain" || host == "broadcasthost" {
+ continue
+ }
+
+ host = core.Trim(host)
+ if _, found := o.lists[host]; found {
+ dups++
+ continue
+ }
+ o.lists[host] = fileName
+ }
+ lines = nil
+ log.Info("%d domains loaded, %s", len(o.lists), fileName)
+
+ return dups
+}
+
+func (o *Operator) readNetList(raw, fileName string) (dups uint64) {
+ log.Debug("Loading nets list: %s, size: %d", fileName, len(raw))
+ lines := strings.Split(string(raw), "\n")
+ for _, line := range lines {
+ if line == "" || line[0] == '#' {
+ continue
+ }
+ host := core.Trim(line)
+ if _, found := o.lists[host]; found {
+ dups++
+ continue
+ }
+ _, netMask, err := net.ParseCIDR(host)
+ if err != nil {
+ log.Warning("Error parsing net from list: %s, (%s)", err, fileName)
+ continue
+ }
+ o.lists[host] = netMask
+ }
+ lines = nil
+ log.Info("%d nets loaded, %s", len(o.lists), fileName)
+
+ return dups
+}
+
+func (o *Operator) readRegexpList(raw, fileName string) (dups uint64) {
+ log.Debug("Loading regexp list: %s, size: %d", fileName, len(raw))
+ lines := strings.Split(string(raw), "\n")
+ for n, line := range lines {
+ if line == "" || line[0] == '#' {
+ continue
+ }
+ host := core.Trim(line)
+ if _, found := o.lists[host]; found {
+ dups++
+ continue
+ }
+ re, err := regexp.Compile(line)
+ if err != nil {
+ log.Warning("Error compiling regexp from list: %s, (%d:%s)", err, n, fileName)
+ continue
+ }
+ o.lists[line] = re
+ }
+ lines = nil
+ log.Info("%d regexps loaded, %s", len(o.lists), fileName)
+
+ return dups
+}
+
+func (o *Operator) readIPList(raw, fileName string) (dups uint64) {
+ log.Debug("Loading IPs list: %s, size: %d", fileName, len(raw))
+ lines := strings.Split(string(raw), "\n")
+ for _, line := range lines {
+ if line == "" || line[0] == '#' {
+ continue
+ }
+ ip := core.Trim(line)
+ if _, found := o.lists[ip]; found {
+ dups++
+ continue
+ }
+ o.lists[ip] = fileName
+ }
+ lines = nil
+ log.Info("%d IPs loaded, %s", len(o.lists), fileName)
+
+ return dups
+}
+
+func (o *Operator) readLists() error {
+ o.ClearLists()
+
+ var dups uint64
+ // this list is particular to this operator and rule
+ o.Lock()
+ defer o.Unlock()
+ o.lists = make(map[string]interface{})
+
+ expr := filepath.Join(o.Data, "*.*")
+ fileList, err := filepath.Glob(expr)
+ if err != nil {
+ return fmt.Errorf("Error loading domains lists '%s': %s", expr, err)
+ }
+
+ for _, fileName := range fileList {
+ // ignore hidden files
+ name := filepath.Base(fileName)
+ if name[:1] == "." {
+ continue
+ }
+
+ raw, err := ioutil.ReadFile(fileName)
+ if err != nil {
+ log.Warning("Error reading list of IPs (%s): %s", fileName, err)
+ continue
+ }
+
+ if o.Operand == OpDomainsLists {
+ dups += o.readDomainsList(string(raw), fileName)
+ } else if o.Operand == OpDomainsRegexpLists {
+ dups += o.readRegexpList(string(raw), fileName)
+ } else if o.Operand == OpNetLists {
+ dups += o.readNetList(string(raw), fileName)
+ } else if o.Operand == OpIPLists {
+ dups += o.readIPList(string(raw), fileName)
+ } else {
+ log.Warning("Unknown lists operand type: %s", o.Operand)
+ }
+ }
+ log.Info("%d lists loaded, %d domains, %d duplicated", len(fileList), len(o.lists), dups)
+ return nil
+}
+
+func (o *Operator) loadLists() {
+ log.Info("loading domains lists: %s, %s, %s", o.Type, o.Operand, o.Data)
+
+ // when loading from disk, we don't use the Operator's constructor, so we need to create this channel
+ if o.exitMonitorChan == nil {
+ o.exitMonitorChan = make(chan bool)
+ o.listsMonitorRunning = true
+ go o.monitorLists()
+ }
+}
diff --git a/daemon/rule/operator_test.go b/daemon/rule/operator_test.go
new file mode 100644
index 0000000..5d7d07b
--- /dev/null
+++ b/daemon/rule/operator_test.go
@@ -0,0 +1,742 @@
+package rule
+
+import (
+ "encoding/json"
+ "fmt"
+ "net"
+ "testing"
+ "time"
+
+ "github.com/evilsocket/opensnitch/daemon/conman"
+ "github.com/evilsocket/opensnitch/daemon/core"
+ "github.com/evilsocket/opensnitch/daemon/netstat"
+ "github.com/evilsocket/opensnitch/daemon/procmon"
+)
+
+var (
+ defaultProcPath = "/usr/bin/opensnitchd"
+ defaultProcArgs = "-rules-path /etc/opensnitchd/rules/"
+ defaultDstHost = "opensnitch.io"
+ defaultDstPort = uint(443)
+ defaultDstIP = "185.53.178.14"
+ defaultUserID = 666
+
+ netEntry = &netstat.Entry{
+ UserId: defaultUserID,
+ }
+
+ proc = &procmon.Process{
+ ID: 12345,
+ Path: defaultProcPath,
+ Args: []string{"-rules-path", "/etc/opensnitchd/rules/"},
+ }
+
+ conn = &conman.Connection{
+ Protocol: "TCP",
+ SrcPort: 66666,
+ SrcIP: net.ParseIP("192.168.1.111"),
+ DstIP: net.ParseIP(defaultDstIP),
+ DstPort: defaultDstPort,
+ DstHost: defaultDstHost,
+ Process: proc,
+ Entry: netEntry,
+ }
+)
+
+func compileListOperators(list *[]Operator, t *testing.T) {
+ op := *list
+ for i := 0; i < len(*list); i++ {
+ if err := op[i].Compile(); err != nil {
+ t.Error("NewOperator List, Compile() subitem error:", err)
+ }
+ }
+}
+
+func unmarshalListData(data string, t *testing.T) (op *[]Operator) {
+ if err := json.Unmarshal([]byte(data), &op); err != nil {
+ t.Error("Error unmarshalling list data:", err, data)
+ return nil
+ }
+ return op
+}
+
+func restoreConnection() {
+ conn.Process.Path = defaultProcPath
+ conn.DstHost = defaultDstHost
+ conn.DstPort = defaultDstPort
+ conn.Entry.UserId = defaultUserID
+}
+
+func TestNewOperatorSimple(t *testing.T) {
+ t.Log("Test NewOperator() simple")
+ var list []Operator
+
+ opSimple, err := NewOperator(Simple, false, OpTrue, "", list)
+ if err != nil {
+ t.Error("NewOperator simple.err should be nil: ", err)
+ t.Fail()
+ }
+ if err = opSimple.Compile(); err != nil {
+ t.Fail()
+ }
+ if opSimple.Match(nil) == false {
+ t.Error("Test NewOperator() simple.case-insensitive doesn't match")
+ t.Fail()
+ }
+
+ t.Run("Operator Simple proc.id", func(t *testing.T) {
+ // proc.id not sensitive
+ opSimple, err = NewOperator(Simple, false, OpProcessID, "12345", list)
+ if err != nil {
+ t.Error("NewOperator simple.case-insensitive.proc.id err should be nil: ", err)
+ t.Fail()
+ }
+ if err = opSimple.Compile(); err != nil {
+ t.Error("NewOperator simple.case-insensitive.proc.id Compile() err:", err)
+ t.Fail()
+ }
+ if opSimple.Match(conn) == false {
+ t.Error("Test NewOperator() simple proc.id doesn't match")
+ t.Fail()
+ }
+ })
+
+ opSimple, err = NewOperator(Simple, false, OpProcessPath, defaultProcPath, list)
+ t.Run("Operator Simple proc.path case-insensitive", func(t *testing.T) {
+ // proc path not sensitive
+ if err != nil {
+ t.Error("NewOperator simple proc.path err should be nil: ", err)
+ t.Fail()
+ }
+ if err = opSimple.Compile(); err != nil {
+ t.Error("NewOperator simple.case-insensitive.proc.path Compile() err:", err)
+ t.Fail()
+ }
+ if opSimple.Match(conn) == false {
+ t.Error("Test NewOperator() simple proc.path doesn't match")
+ t.Fail()
+ }
+ })
+
+ t.Run("Operator Simple proc.path sensitive", func(t *testing.T) {
+ // proc path sensitive
+ opSimple.Sensitive = true
+ conn.Process.Path = "/usr/bin/OpenSnitchd"
+ if opSimple.Match(conn) == true {
+ t.Error("Test NewOperator() simple proc.path sensitive match")
+ t.Fail()
+ }
+ })
+
+ opSimple, err = NewOperator(Simple, false, OpDstHost, defaultDstHost, list)
+ t.Run("Operator Simple con.dstHost case-insensitive", func(t *testing.T) {
+ // proc dst host not sensitive
+ if err != nil {
+ t.Error("NewOperator simple proc.path err should be nil: ", err)
+ t.Fail()
+ }
+ if err = opSimple.Compile(); err != nil {
+ t.Error("NewOperator simple.case-insensitive.dstHost Compile() err:", err)
+ t.Fail()
+ }
+ if opSimple.Match(conn) == false {
+ t.Error("Test NewOperator() simple.conn.dstHost.not-sensitive doesn't match")
+ t.Fail()
+ }
+ })
+
+ t.Run("Operator Simple con.dstHost case-insensitive different host", func(t *testing.T) {
+ conn.DstHost = "www.opensnitch.io"
+ if opSimple.Match(conn) == true {
+ t.Error("Test NewOperator() simple.conn.dstHost.not-sensitive doesn't MATCH")
+ t.Fail()
+ }
+ })
+
+ t.Run("Operator Simple con.dstHost sensitive", func(t *testing.T) {
+ // proc dst host sensitive
+ opSimple, err = NewOperator(Simple, true, OpDstHost, "OpEnsNitCh.io", list)
+ if err != nil {
+ t.Error("NewOperator simple.dstHost.sensitive err should be nil: ", err)
+ t.Fail()
+ }
+ if err = opSimple.Compile(); err != nil {
+ t.Error("NewOperator simple.dstHost.sensitive Compile() err:", err)
+ t.Fail()
+ }
+ conn.DstHost = "OpEnsNitCh.io"
+ if opSimple.Match(conn) == false {
+ t.Error("Test NewOperator() simple.dstHost.sensitive doesn't match")
+ t.Fail()
+ }
+ })
+
+ t.Run("Operator Simple proc.args case-insensitive", func(t *testing.T) {
+ // proc args case-insensitive
+ opSimple, err = NewOperator(Simple, false, OpProcessCmd, defaultProcArgs, list)
+ if err != nil {
+ t.Error("NewOperator simple proc.args err should be nil: ", err)
+ t.Fail()
+ }
+ if err = opSimple.Compile(); err != nil {
+ t.Error("NewOperator simple proc.args Compile() err: ", err)
+ t.Fail()
+ }
+ if opSimple.Match(conn) == false {
+ t.Error("Test NewOperator() simple proc.args doesn't match")
+ t.Fail()
+ }
+ })
+
+ t.Run("Operator Simple con.dstIp case-insensitive", func(t *testing.T) {
+ // proc dstIp case-insensitive
+ opSimple, err = NewOperator(Simple, false, OpDstIP, defaultDstIP, list)
+ if err != nil {
+ t.Error("NewOperator simple conn.dstip.err should be nil: ", err)
+ t.Fail()
+ }
+ if err = opSimple.Compile(); err != nil {
+ t.Error("NewOperator simple con.dstIp Compile() err: ", err)
+ t.Fail()
+ }
+ if opSimple.Match(conn) == false {
+ t.Error("Test NewOperator() simple conn.dstip doesn't match")
+ t.Fail()
+ }
+ })
+
+ t.Run("Operator Simple UserId case-insensitive", func(t *testing.T) {
+ // conn.uid case-insensitive
+ opSimple, err = NewOperator(Simple, false, OpUserID, fmt.Sprint(defaultUserID), list)
+ if err != nil {
+ t.Error("NewOperator simple conn.userid.err should be nil: ", err)
+ t.Fail()
+ }
+ if err = opSimple.Compile(); err != nil {
+ t.Error("NewOperator simple UserId Compile() err: ", err)
+ t.Fail()
+ }
+ if opSimple.Match(conn) == false {
+ t.Error("Test NewOperator() simple conn.userid doesn't match")
+ t.Fail()
+ }
+ })
+
+ restoreConnection()
+}
+
+func TestNewOperatorNetwork(t *testing.T) {
+ t.Log("Test NewOperator() network")
+ var dummyList []Operator
+
+ opSimple, err := NewOperator(Network, false, OpDstNetwork, "185.53.178.14/24", dummyList)
+ if err != nil {
+ t.Error("NewOperator network.err should be nil: ", err)
+ t.Fail()
+ }
+ if err = opSimple.Compile(); err != nil {
+ t.Fail()
+ }
+ if opSimple.Match(conn) == false {
+ t.Error("Test NewOperator() network doesn't match")
+ t.Fail()
+ }
+
+ opSimple, err = NewOperator(Network, false, OpDstNetwork, "8.8.8.8/24", dummyList)
+ if err != nil {
+ t.Error("NewOperator network.err should be nil: ", err)
+ t.Fail()
+ }
+ if err = opSimple.Compile(); err != nil {
+ t.Fail()
+ }
+ if opSimple.Match(conn) == true {
+ t.Error("Test NewOperator() network doesn't match:", conn.DstIP)
+ t.Fail()
+ }
+
+ restoreConnection()
+}
+
+func TestNewOperatorRegexp(t *testing.T) {
+ t.Log("Test NewOperator() regexp")
+ var dummyList []Operator
+
+ opRE, err := NewOperator(Regexp, false, OpProto, "^TCP$", dummyList)
+ if err != nil {
+ t.Error("NewOperator regexp.err should be nil: ", err)
+ t.Fail()
+ }
+ if err = opRE.Compile(); err != nil {
+ t.Fail()
+ }
+ if opRE.Match(conn) == false {
+ t.Error("Test NewOperator() regexp doesn't match")
+ t.Fail()
+ }
+
+ restoreConnection()
+}
+
+func TestNewOperatorInvalidRegexp(t *testing.T) {
+ t.Log("Test NewOperator() invalid regexp")
+ var dummyList []Operator
+
+ opRE, err := NewOperator(Regexp, false, OpProto, "^TC(P$", dummyList)
+ if err != nil {
+ t.Error("NewOperator regexp.err should be nil: ", err)
+ t.Fail()
+ }
+ if err = opRE.Compile(); err == nil {
+ t.Error("NewOperator() invalid regexp. It should fail: ", err)
+ t.Fail()
+ }
+
+ restoreConnection()
+}
+
+func TestNewOperatorRegexpSensitive(t *testing.T) {
+ t.Log("Test NewOperator() regexp sensitive")
+ var dummyList []Operator
+
+ var sensitive Sensitive
+ sensitive = true
+
+ conn.Process.Path = "/tmp/cUrL"
+
+ opRE, err := NewOperator(Regexp, sensitive, OpProcessPath, "^/tmp/cUrL$", dummyList)
+ if err != nil {
+ t.Error("NewOperator regexp.case-sensitive.err should be nil: ", err)
+ t.Fail()
+ }
+ if err = opRE.Compile(); err != nil {
+ t.Fail()
+ }
+ if opRE.Match(conn) == false {
+ t.Error("Test NewOperator() RE sensitive doesn't match:", conn.Process.Path)
+ t.Fail()
+ }
+
+ t.Run("Operator regexp proc.path case-sensitive", func(t *testing.T) {
+ conn.Process.Path = "/tmp/curl"
+ if opRE.Match(conn) == true {
+ t.Error("Test NewOperator() RE sensitive match:", conn.Process.Path)
+ t.Fail()
+ }
+ })
+
+ opRE, err = NewOperator(Regexp, !sensitive, OpProcessPath, "^/tmp/cUrL$", dummyList)
+ if err != nil {
+ t.Error("NewOperator regexp.case-insensitive.err should be nil: ", err)
+ t.Fail()
+ }
+ if err = opRE.Compile(); err != nil {
+ t.Fail()
+ }
+ if opRE.Match(conn) == false {
+ t.Error("Test NewOperator() RE not sensitive match:", conn.Process.Path)
+ t.Fail()
+ }
+
+ restoreConnection()
+}
+
+func TestNewOperatorList(t *testing.T) {
+ t.Log("Test NewOperator() List")
+ var list []Operator
+ listData := `[{"type": "simple", "operand": "dest.ip", "data": "185.53.178.14", "sensitive": false}, {"type": "simple", "operand": "dest.port", "data": "443", "sensitive": false}]`
+
+ // simple list
+ opList, err := NewOperator(List, false, OpProto, listData, list)
+ t.Run("Operator List simple case-insensitive", func(t *testing.T) {
+ if err != nil {
+ t.Error("NewOperator list.regexp.err should be nil: ", err)
+ t.Fail()
+ }
+ if err = opList.Compile(); err != nil {
+ t.Fail()
+ }
+ opList.List = *unmarshalListData(opList.Data, t)
+ compileListOperators(&opList.List, t)
+ if opList.Match(conn) == false {
+ t.Error("Test NewOperator() list simple doesn't match")
+ t.Fail()
+ }
+ })
+
+ t.Run("Operator List regexp case-insensitive", func(t *testing.T) {
+ // list with regexp, case-insensitive
+ listData = `[{"type": "regexp", "operand": "process.path", "data": "^/usr/bin/.*", "sensitive": false},{"type": "simple", "operand": "dest.ip", "data": "185.53.178.14", "sensitive": false}, {"type": "simple", "operand": "dest.port", "data": "443", "sensitive": false}]`
+ opList.List = *unmarshalListData(listData, t)
+ compileListOperators(&opList.List, t)
+ if err = opList.Compile(); err != nil {
+ t.Fail()
+ }
+ if opList.Match(conn) == false {
+ t.Error("Test NewOperator() list regexp doesn't match")
+ t.Fail()
+ }
+ })
+
+ t.Run("Operator List regexp case-sensitive", func(t *testing.T) {
+ // list with regexp, case-sensitive
+ // "data": "^/usr/BiN/.*" must match conn.Process.Path (sensitive)
+ listData = `[{"type": "regexp", "operand": "process.path", "data": "^/usr/BiN/.*", "sensitive": false},{"type": "simple", "operand": "dest.ip", "data": "185.53.178.14", "sensitive": false}, {"type": "simple", "operand": "dest.port", "data": "443", "sensitive": false}]`
+ opList.List = *unmarshalListData(listData, t)
+ compileListOperators(&opList.List, t)
+ conn.Process.Path = "/usr/BiN/opensnitchd"
+ opList.Sensitive = true
+ if err = opList.Compile(); err != nil {
+ t.Fail()
+ }
+ if opList.Match(conn) == false {
+ t.Error("Test NewOperator() list.regexp.sensitive doesn't match:", conn.Process.Path)
+ t.Fail()
+ }
+ })
+
+ t.Run("Operator List regexp case-insensitive 2", func(t *testing.T) {
+ // "data": "^/usr/BiN/.*" must not match conn.Process.Path (insensitive)
+ opList.Sensitive = false
+ conn.Process.Path = "/USR/BiN/opensnitchd"
+ if err = opList.Compile(); err != nil {
+ t.Fail()
+ }
+ if opList.Match(conn) == false {
+ t.Error("Test NewOperator() list.regexp.insensitive match:", conn.Process.Path)
+ t.Fail()
+ }
+ })
+
+ t.Run("Operator List regexp case-insensitive 3", func(t *testing.T) {
+ // "data": "^/usr/BiN/.*" must match conn.Process.Path (insensitive)
+ opList.Sensitive = false
+ conn.Process.Path = "/USR/bin/opensnitchd"
+ if err = opList.Compile(); err != nil {
+ t.Fail()
+ }
+ if opList.Match(conn) == false {
+ t.Error("Test NewOperator() list.regexp.insensitive match:", conn.Process.Path)
+ t.Fail()
+ }
+ })
+
+ restoreConnection()
+}
+
+func TestNewOperatorListsSimple(t *testing.T) {
+ t.Log("Test NewOperator() Lists simple")
+ var dummyList []Operator
+
+ opLists, err := NewOperator(Lists, false, OpDomainsLists, "testdata/lists/domains/", dummyList)
+ if err != nil {
+ t.Error("NewOperator Lists, shouldn't be nil: ", err)
+ t.Fail()
+ }
+ if err = opLists.Compile(); err != nil {
+ t.Error("NewOperator Lists, Compile() error:", err)
+ }
+ time.Sleep(time.Second)
+ t.Log("testing Lists, DstHost:", conn.DstHost)
+ // The list contains 4 lines, 1 is a comment and there's a domain duplicated.
+ // We should only load lines that start with 0.0.0.0 or 127.0.0.1
+ if len(opLists.lists) != 2 {
+ t.Error("NewOperator Lists, number of domains error:", opLists.lists, len(opLists.lists))
+ }
+ if opLists.Match(conn) == false {
+ t.Error("Test NewOperator() lists doesn't match")
+ }
+
+ opLists.StopMonitoringLists()
+ time.Sleep(time.Second)
+ opLists.Lock()
+ if len(opLists.lists) != 0 {
+ t.Error("NewOperator Lists, number should be 0 after stop:", opLists.lists, len(opLists.lists))
+ }
+ opLists.Unlock()
+
+ restoreConnection()
+}
+
+func TestNewOperatorListsIPs(t *testing.T) {
+ t.Log("Test NewOperator() Lists domains_regexp")
+
+ var subOp *Operator
+ var list []Operator
+ listData := `[{"type": "simple", "operand": "user.id", "data": "666", "sensitive": false}, {"type": "lists", "operand": "lists.ips", "data": "testdata/lists/ips/", "sensitive": false}]`
+
+ opLists, err := NewOperator(List, false, OpList, listData, list)
+ if err != nil {
+ t.Error("NewOperator Lists domains_regexp, shouldn't be nil: ", err)
+ t.Fail()
+ }
+ if err := opLists.Compile(); err != nil {
+ t.Error("NewOperator Lists domains_regexp, Compile() error:", err)
+ }
+ opLists.List = *unmarshalListData(opLists.Data, t)
+ for i := 0; i < len(opLists.List); i++ {
+ if err := opLists.List[i].Compile(); err != nil {
+ t.Error("NewOperator Lists domains_regexp, Compile() subitem error:", err)
+ }
+ if opLists.List[i].Type == Lists {
+ subOp = &opLists.List[i]
+ }
+ }
+
+ time.Sleep(time.Second)
+ if opLists.Match(conn) == false {
+ t.Error("Test NewOperator() Lists domains_regexp, doesn't match:", conn.DstHost)
+ }
+
+ subOp.Lock()
+ listslen := len(subOp.lists)
+ subOp.Unlock()
+ if listslen != 2 {
+ t.Error("NewOperator Lists domains_regexp, number of domains error:", subOp.lists)
+ }
+
+ //t.Log("checking lists.domains_regexp:", tries, conn.DstHost)
+ if opLists.Match(conn) == false {
+ // we don't care about if it matches, we're testing race conditions
+ t.Log("Test NewOperator() Lists domains_regexp, doesn't match:", conn.DstHost)
+ }
+
+ subOp.StopMonitoringLists()
+ time.Sleep(time.Second)
+ subOp.Lock()
+ if len(subOp.lists) != 0 {
+ t.Error("NewOperator Lists number should be 0:", subOp.lists, len(subOp.lists))
+ }
+ subOp.Unlock()
+
+ restoreConnection()
+}
+
+func TestNewOperatorListsNETs(t *testing.T) {
+ t.Log("Test NewOperator() Lists domains_regexp")
+
+ var subOp *Operator
+ var list []Operator
+ listData := `[{"type": "simple", "operand": "user.id", "data": "666", "sensitive": false}, {"type": "lists", "operand": "lists.nets", "data": "testdata/lists/nets/", "sensitive": false}]`
+
+ opLists, err := NewOperator(List, false, OpList, listData, list)
+ if err != nil {
+ t.Error("NewOperator Lists domains_regexp, shouldn't be nil: ", err)
+ t.Fail()
+ }
+ if err := opLists.Compile(); err != nil {
+ t.Error("NewOperator Lists domains_regexp, Compile() error:", err)
+ }
+ opLists.List = *unmarshalListData(opLists.Data, t)
+ for i := 0; i < len(opLists.List); i++ {
+ if err := opLists.List[i].Compile(); err != nil {
+ t.Error("NewOperator Lists domains_regexp, Compile() subitem error:", err)
+ }
+ if opLists.List[i].Type == Lists {
+ subOp = &opLists.List[i]
+ }
+ }
+
+ time.Sleep(time.Second)
+ if opLists.Match(conn) == false {
+ t.Error("Test NewOperator() Lists domains_regexp, doesn't match:", conn.DstHost)
+ }
+
+ subOp.Lock()
+ listslen := len(subOp.lists)
+ subOp.Unlock()
+ if listslen != 2 {
+ t.Error("NewOperator Lists domains_regexp, number of domains error:", subOp.lists)
+ }
+
+ //t.Log("checking lists.domains_regexp:", tries, conn.DstHost)
+ if opLists.Match(conn) == false {
+ // we don't care about if it matches, we're testing race conditions
+ t.Log("Test NewOperator() Lists domains_regexp, doesn't match:", conn.DstHost)
+ }
+
+ subOp.StopMonitoringLists()
+ time.Sleep(time.Second)
+ subOp.Lock()
+ if len(subOp.lists) != 0 {
+ t.Error("NewOperator Lists number should be 0:", subOp.lists, len(subOp.lists))
+ }
+ subOp.Unlock()
+
+ restoreConnection()
+}
+
+func TestNewOperatorListsComplex(t *testing.T) {
+ t.Log("Test NewOperator() Lists complex")
+ var subOp *Operator
+ var list []Operator
+ listData := `[{"type": "simple", "operand": "user.id", "data": "666", "sensitive": false}, {"type": "lists", "operand": "lists.domains", "data": "testdata/lists/domains/", "sensitive": false}]`
+
+ opLists, err := NewOperator(List, false, OpList, listData, list)
+ if err != nil {
+ t.Error("NewOperator Lists complex, shouldn't be nil: ", err)
+ t.Fail()
+ }
+ if err := opLists.Compile(); err != nil {
+ t.Error("NewOperator Lists complex, Compile() error:", err)
+ }
+ opLists.List = *unmarshalListData(opLists.Data, t)
+ for i := 0; i < len(opLists.List); i++ {
+ if err := opLists.List[i].Compile(); err != nil {
+ t.Error("NewOperator Lists complex, Compile() subitem error:", err)
+ }
+ if opLists.List[i].Type == Lists {
+ subOp = &opLists.List[i]
+ }
+ }
+ time.Sleep(time.Second)
+ subOp.Lock()
+ if len(subOp.lists) != 2 {
+ t.Error("NewOperator Lists complex, number of domains error:", subOp.lists)
+ }
+ subOp.Unlock()
+ if opLists.Match(conn) == false {
+ t.Error("Test NewOperator() Lists complex, doesn't match")
+ }
+
+ subOp.StopMonitoringLists()
+ time.Sleep(time.Second)
+ subOp.Lock()
+ if len(subOp.lists) != 0 {
+ t.Error("NewOperator Lists number should be 0:", subOp.lists, len(subOp.lists))
+ }
+ subOp.Unlock()
+
+ restoreConnection()
+}
+
+func TestNewOperatorListsDomainsRegexp(t *testing.T) {
+ t.Log("Test NewOperator() Lists domains_regexp")
+
+ var subOp *Operator
+ var list []Operator
+ listData := `[{"type": "simple", "operand": "user.id", "data": "666", "sensitive": false}, {"type": "lists", "operand": "lists.domains_regexp", "data": "testdata/lists/regexp/", "sensitive": false}]`
+
+ opLists, err := NewOperator(List, false, OpList, listData, list)
+ if err != nil {
+ t.Error("NewOperator Lists domains_regexp, shouldn't be nil: ", err)
+ t.Fail()
+ }
+ if err := opLists.Compile(); err != nil {
+ t.Error("NewOperator Lists domains_regexp, Compile() error:", err)
+ }
+ opLists.List = *unmarshalListData(opLists.Data, t)
+ for i := 0; i < len(opLists.List); i++ {
+ if err := opLists.List[i].Compile(); err != nil {
+ t.Error("NewOperator Lists domains_regexp, Compile() subitem error:", err)
+ }
+ if opLists.List[i].Type == Lists {
+ subOp = &opLists.List[i]
+ }
+ }
+
+ time.Sleep(time.Second)
+ if opLists.Match(conn) == false {
+ t.Error("Test NewOperator() Lists domains_regexp, doesn't match:", conn.DstHost)
+ }
+
+ subOp.Lock()
+ listslen := len(subOp.lists)
+ subOp.Unlock()
+ if listslen != 2 {
+ t.Error("NewOperator Lists domains_regexp, number of domains error:", subOp.lists)
+ }
+
+ //t.Log("checking lists.domains_regexp:", tries, conn.DstHost)
+ if opLists.Match(conn) == false {
+ // we don't care about if it matches, we're testing race conditions
+ t.Log("Test NewOperator() Lists domains_regexp, doesn't match:", conn.DstHost)
+ }
+
+ subOp.StopMonitoringLists()
+ time.Sleep(time.Second)
+ subOp.Lock()
+ if len(subOp.lists) != 0 {
+ t.Error("NewOperator Lists number should be 0:", subOp.lists, len(subOp.lists))
+ }
+ subOp.Unlock()
+
+ restoreConnection()
+}
+
+// Must be launched with -race to test that we don't cause leaks
+// Race occured on operator.go:241 reListCmp().MathString()
+// fixed here: 53419fe
+func TestRaceNewOperatorListsDomainsRegexp(t *testing.T) {
+ t.Log("Test NewOperator() Lists domains_regexp")
+
+ var subOp *Operator
+ var list []Operator
+ listData := `[{"type": "simple", "operand": "user.id", "data": "666", "sensitive": false}, {"type": "lists", "operand": "lists.domains_regexp", "data": "testdata/lists/regexp/", "sensitive": false}]`
+
+ opLists, err := NewOperator(List, false, OpList, listData, list)
+ if err != nil {
+ t.Error("NewOperator Lists domains_regexp, shouldn't be nil: ", err)
+ t.Fail()
+ }
+ if err := opLists.Compile(); err != nil {
+ t.Error("NewOperator Lists domains_regexp, Compile() error:", err)
+ }
+ opLists.List = *unmarshalListData(opLists.Data, t)
+ for i := 0; i < len(opLists.List); i++ {
+ if err := opLists.List[i].Compile(); err != nil {
+ t.Error("NewOperator Lists domains_regexp, Compile() subitem error:", err)
+ }
+ if opLists.List[i].Type == Lists {
+ subOp = &opLists.List[i]
+ }
+ }
+
+ // touch domains list in background, to force a reload.
+ go func() {
+ touches := 1000
+ for {
+ if touches < 0 {
+ break
+ }
+ core.Exec("/bin/touch", []string{"testdata/lists/regexp/domainsregexp.txt"})
+ touches--
+ time.Sleep(100 * time.Millisecond)
+ //t.Log("touching:", touches)
+ }
+ }()
+
+ time.Sleep(time.Second)
+
+ subOp.Lock()
+ listslen := len(subOp.lists)
+ subOp.Unlock()
+ if listslen != 2 {
+ t.Error("NewOperator Lists domains_regexp, number of domains error:", subOp.lists)
+ }
+
+ tries := 10000
+ for {
+ if tries < 0 {
+ break
+ }
+ //t.Log("checking lists.domains_regexp:", tries, conn.DstHost)
+ if opLists.Match(conn) == false {
+ // we don't care about if it matches, we're testing race conditions
+ t.Log("Test NewOperator() Lists domains_regexp, doesn't match:", conn.DstHost)
+ }
+
+ tries--
+ time.Sleep(10 * time.Millisecond)
+ }
+
+ subOp.StopMonitoringLists()
+ time.Sleep(time.Second)
+ subOp.Lock()
+ if len(subOp.lists) != 0 {
+ t.Error("NewOperator Lists number should be 0:", subOp.lists, len(subOp.lists))
+ }
+ subOp.Unlock()
+
+ restoreConnection()
+}
diff --git a/daemon/rule/rule.go b/daemon/rule/rule.go
new file mode 100644
index 0000000..b51cf8f
--- /dev/null
+++ b/daemon/rule/rule.go
@@ -0,0 +1,115 @@
+package rule
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/evilsocket/opensnitch/daemon/conman"
+ "github.com/evilsocket/opensnitch/daemon/log"
+ "github.com/evilsocket/opensnitch/daemon/ui/protocol"
+)
+
+// Action of a rule
+type Action string
+
+// Actions of rules
+const (
+ Allow = Action("allow")
+ Deny = Action("deny")
+ Reject = Action("reject")
+)
+
+// Duration of a rule
+type Duration string
+
+// daemon possible durations
+const (
+ Once = Duration("once")
+ Restart = Duration("until restart")
+ Always = Duration("always")
+)
+
+// Rule represents an action on a connection.
+// The fields match the ones saved as json to disk.
+// If a .json rule file is modified on disk, it's reloaded automatically.
+type Rule struct {
+ Created time.Time `json:"created"`
+ Updated time.Time `json:"updated"`
+ Name string `json:"name"`
+ Enabled bool `json:"enabled"`
+ Precedence bool `json:"precedence"`
+ Action Action `json:"action"`
+ Duration Duration `json:"duration"`
+ Operator Operator `json:"operator"`
+}
+
+// Create creates a new rule object with the specified parameters.
+func Create(name string, enabled bool, precedence bool, action Action, duration Duration, op *Operator) *Rule {
+ return &Rule{
+ Created: time.Now(),
+ Enabled: enabled,
+ Precedence: precedence,
+ Name: name,
+ Action: action,
+ Duration: duration,
+ Operator: *op,
+ }
+}
+
+func (r *Rule) String() string {
+ return fmt.Sprintf("%s: if(%s){ %s %s }", r.Name, r.Operator.String(), r.Action, r.Duration)
+}
+
+// Match performs on a connection the checks a Rule has, to determine if it
+// must be allowed or denied.
+func (r *Rule) Match(con *conman.Connection) bool {
+ return r.Operator.Match(con)
+}
+
+// Deserialize translates back the rule received to a Rule object
+func Deserialize(reply *protocol.Rule) (*Rule, error) {
+ if reply.Operator == nil {
+ log.Warning("Deserialize rule, Operator nil")
+ return nil, fmt.Errorf("invalid operator")
+ }
+ operator, err := NewOperator(
+ Type(reply.Operator.Type),
+ Sensitive(reply.Operator.Sensitive),
+ Operand(reply.Operator.Operand),
+ reply.Operator.Data,
+ make([]Operator, 0),
+ )
+ if err != nil {
+ log.Warning("Deserialize rule, NewOperator() error: %s", err)
+ return nil, err
+ }
+
+ return Create(
+ reply.Name,
+ reply.Enabled,
+ reply.Precedence,
+ Action(reply.Action),
+ Duration(reply.Duration),
+ operator,
+ ), nil
+}
+
+// Serialize translates a Rule to the protocol object
+func (r *Rule) Serialize() *protocol.Rule {
+ if r == nil {
+ return nil
+ }
+ return &protocol.Rule{
+ Name: string(r.Name),
+ Enabled: bool(r.Enabled),
+ Precedence: bool(r.Precedence),
+ Action: string(r.Action),
+ Duration: string(r.Duration),
+ Operator: &protocol.Operator{
+ Type: string(r.Operator.Type),
+ Sensitive: bool(r.Operator.Sensitive),
+ Operand: string(r.Operator.Operand),
+ Data: string(r.Operator.Data),
+ },
+ }
+}
diff --git a/daemon/rule/rule_test.go b/daemon/rule/rule_test.go
new file mode 100644
index 0000000..cc9017a
--- /dev/null
+++ b/daemon/rule/rule_test.go
@@ -0,0 +1,47 @@
+package rule
+
+import "testing"
+
+func TestCreate(t *testing.T) {
+ t.Log("Test: Create rule")
+
+ var list []Operator
+ oper, _ := NewOperator(Simple, false, OpTrue, "", list)
+ r := Create("000-test-name", true, false, Allow, Once, oper)
+ t.Run("New rule must not be nil", func(t *testing.T) {
+ if r == nil {
+ t.Error("Create() returned nil")
+ t.Fail()
+ }
+ })
+ t.Run("Rule name must be 000-test-name", func(t *testing.T) {
+ if r.Name != "000-test-name" {
+ t.Error("Rule name error:", r.Name)
+ t.Fail()
+ }
+ })
+ t.Run("Rule must be enabled", func(t *testing.T) {
+ if r.Enabled == false {
+ t.Error("Rule Enabled is false:", r)
+ t.Fail()
+ }
+ })
+ t.Run("Rule Precedence must be false", func(t *testing.T) {
+ if r.Precedence == true {
+ t.Error("Rule Precedence is true:", r)
+ t.Fail()
+ }
+ })
+ t.Run("Rule Action must be Allow", func(t *testing.T) {
+ if r.Action != Allow {
+ t.Error("Rule Action is not Allow:", r.Action)
+ t.Fail()
+ }
+ })
+ t.Run("Rule Duration should be Once", func(t *testing.T) {
+ if r.Duration != Once {
+ t.Error("Rule Duration is not Once:", r.Duration)
+ t.Fail()
+ }
+ })
+}
diff --git a/daemon/rule/testdata/000-allow-chrome.json b/daemon/rule/testdata/000-allow-chrome.json
new file mode 100644
index 0000000..db2c811
--- /dev/null
+++ b/daemon/rule/testdata/000-allow-chrome.json
@@ -0,0 +1,16 @@
+{
+ "created": "2020-12-13T18:06:52.209804547+01:00",
+ "updated": "2020-12-13T18:06:52.209857713+01:00",
+ "name": "000-allow-chrome",
+ "enabled": true,
+ "precedence": true,
+ "action": "allow",
+ "duration": "always",
+ "operator": {
+ "type": "simple",
+ "operand": "process.path",
+ "sensitive": false,
+ "data": "/opt/google/chrome/chrome",
+ "list": []
+ }
+}
\ No newline at end of file
diff --git a/daemon/rule/testdata/001-deny-chrome.json b/daemon/rule/testdata/001-deny-chrome.json
new file mode 100644
index 0000000..27c266c
--- /dev/null
+++ b/daemon/rule/testdata/001-deny-chrome.json
@@ -0,0 +1,16 @@
+{
+ "created": "2020-12-13T17:54:49.067148304+01:00",
+ "updated": "2020-12-13T17:54:49.067213602+01:00",
+ "name": "001-deny-chrome",
+ "enabled": true,
+ "precedence": false,
+ "action": "deny",
+ "duration": "always",
+ "operator": {
+ "type": "simple",
+ "operand": "process.path",
+ "sensitive": false,
+ "data": "/opt/google/chrome/chrome",
+ "list": []
+ }
+}
\ No newline at end of file
diff --git a/daemon/rule/testdata/invalid-regexp-list.json b/daemon/rule/testdata/invalid-regexp-list.json
new file mode 100644
index 0000000..bd8973f
--- /dev/null
+++ b/daemon/rule/testdata/invalid-regexp-list.json
@@ -0,0 +1,31 @@
+{
+ "created": "2020-12-13T18:06:52.209804547+01:00",
+ "updated": "2020-12-13T18:06:52.209857713+01:00",
+ "name": "invalid-regexp-list",
+ "enabled": true,
+ "precedence": true,
+ "action": "allow",
+ "duration": "always",
+ "operator": {
+ "type": "list",
+ "operand": "list",
+ "sensitive": false,
+ "data": "[{\"type\": \"regexp\", \"operand\": \"process.path\", \"sensitive\": false, \"data\": \"^(/di(rmngr$\"}, {\"type\": \"simple\", \"operand\": \"dest.port\", \"data\": \"53\", \"sensitive\": false}]",
+ "list": [
+ {
+ "type": "regexp",
+ "operand": "process.path",
+ "sensitive": false,
+ "data": "^(/di(rmngr)$",
+ "list": null
+ },
+ {
+ "type": "simple",
+ "operand": "dest.port",
+ "sensitive": false,
+ "data": "53",
+ "list": null
+ }
+ ]
+ }
+}
diff --git a/daemon/rule/testdata/invalid-regexp.json b/daemon/rule/testdata/invalid-regexp.json
new file mode 100644
index 0000000..d296098
--- /dev/null
+++ b/daemon/rule/testdata/invalid-regexp.json
@@ -0,0 +1,16 @@
+{
+ "created": "2020-12-13T18:06:52.209804547+01:00",
+ "updated": "2020-12-13T18:06:52.209857713+01:00",
+ "name": "invalid-regexp",
+ "enabled": true,
+ "precedence": true,
+ "action": "allow",
+ "duration": "always",
+ "operator": {
+ "type": "regexp",
+ "operand": "process.path",
+ "sensitive": false,
+ "data": "/opt/((.*)google/chrome/chrome",
+ "list": []
+ }
+}
diff --git a/daemon/rule/testdata/lists/domains/domainlists.txt b/daemon/rule/testdata/lists/domains/domainlists.txt
new file mode 100644
index 0000000..6e2f3e2
--- /dev/null
+++ b/daemon/rule/testdata/lists/domains/domainlists.txt
@@ -0,0 +1,4 @@
+# this line must be ignored, 0.0.0.0 www.test.org
+0.0.0.0 www.test.org
+127.0.0.1 www.test.org
+0.0.0.0 opensnitch.io
diff --git a/daemon/rule/testdata/lists/ips/ips.txt b/daemon/rule/testdata/lists/ips/ips.txt
new file mode 100644
index 0000000..6514d30
--- /dev/null
+++ b/daemon/rule/testdata/lists/ips/ips.txt
@@ -0,0 +1,7 @@
+# this line must be ignored, 0.0.0.0 www.test.org
+
+# empty lines are also ignored
+1.1.1.1
+185.53.178.14
+# duplicated entries should be ignored
+1.1.1.1
diff --git a/daemon/rule/testdata/lists/nets/nets.txt b/daemon/rule/testdata/lists/nets/nets.txt
new file mode 100644
index 0000000..8041c92
--- /dev/null
+++ b/daemon/rule/testdata/lists/nets/nets.txt
@@ -0,0 +1,8 @@
+# this line must be ignored, 0.0.0.0 www.test.org
+
+# empty lines are also ignored
+1.1.1.0/24
+185.53.178.0/24
+# duplicated entries should be ignored
+1.1.1.0/24
+
diff --git a/daemon/rule/testdata/lists/regexp/domainsregexp.txt b/daemon/rule/testdata/lists/regexp/domainsregexp.txt
new file mode 100644
index 0000000..85ab3e9
--- /dev/null
+++ b/daemon/rule/testdata/lists/regexp/domainsregexp.txt
@@ -0,0 +1,4 @@
+# this line must be ignored, 0.0.0.0 www.test.org
+www.test.org
+www.test.org
+opensnitch.io
diff --git a/daemon/rule/testdata/live_reload/test-live-reload-delete.json b/daemon/rule/testdata/live_reload/test-live-reload-delete.json
new file mode 100644
index 0000000..5a4591a
--- /dev/null
+++ b/daemon/rule/testdata/live_reload/test-live-reload-delete.json
@@ -0,0 +1,16 @@
+{
+ "created": "2020-12-13T18:06:52.209804547+01:00",
+ "updated": "2020-12-13T18:06:52.209857713+01:00",
+ "name": "test-live-reload-delete",
+ "enabled": true,
+ "precedence": true,
+ "action": "deny",
+ "duration": "always",
+ "operator": {
+ "type": "simple",
+ "operand": "process.path",
+ "sensitive": false,
+ "data": "/usr/bin/curl",
+ "list": []
+ }
+ }
\ No newline at end of file
diff --git a/daemon/rule/testdata/live_reload/test-live-reload-remove.json b/daemon/rule/testdata/live_reload/test-live-reload-remove.json
new file mode 100644
index 0000000..8f21ed9
--- /dev/null
+++ b/daemon/rule/testdata/live_reload/test-live-reload-remove.json
@@ -0,0 +1,16 @@
+{
+ "created": "2020-12-13T18:06:52.209804547+01:00",
+ "updated": "2020-12-13T18:06:52.209857713+01:00",
+ "name": "test-live-reload-remove",
+ "enabled": true,
+ "precedence": true,
+ "action": "deny",
+ "duration": "always",
+ "operator": {
+ "type": "simple",
+ "operand": "process.path",
+ "sensitive": false,
+ "data": "/usr/bin/curl",
+ "list": []
+ }
+ }
\ No newline at end of file
diff --git a/daemon/statistics/event.go b/daemon/statistics/event.go
new file mode 100644
index 0000000..fe9e9ee
--- /dev/null
+++ b/daemon/statistics/event.go
@@ -0,0 +1,32 @@
+package statistics
+
+import (
+ "time"
+
+ "github.com/evilsocket/opensnitch/daemon/conman"
+ "github.com/evilsocket/opensnitch/daemon/rule"
+ "github.com/evilsocket/opensnitch/daemon/ui/protocol"
+)
+
+type Event struct {
+ Time time.Time
+ Connection *conman.Connection
+ Rule *rule.Rule
+}
+
+func NewEvent(con *conman.Connection, match *rule.Rule) *Event {
+ return &Event{
+ Time: time.Now(),
+ Connection: con,
+ Rule: match,
+ }
+}
+
+func (e *Event) Serialize() *protocol.Event {
+ return &protocol.Event{
+ Time: e.Time.Format("2006-01-02 15:04:05"),
+ Connection: e.Connection.Serialize(),
+ Rule: e.Rule.Serialize(),
+ Unixnano: e.Time.UnixNano(),
+ }
+}
diff --git a/daemon/statistics/stats.go b/daemon/statistics/stats.go
new file mode 100644
index 0000000..caa71e1
--- /dev/null
+++ b/daemon/statistics/stats.go
@@ -0,0 +1,244 @@
+package statistics
+
+import (
+ "fmt"
+ "sync"
+ "time"
+
+ "github.com/evilsocket/opensnitch/daemon/conman"
+ "github.com/evilsocket/opensnitch/daemon/core"
+ "github.com/evilsocket/opensnitch/daemon/log"
+ "github.com/evilsocket/opensnitch/daemon/rule"
+ "github.com/evilsocket/opensnitch/daemon/ui/protocol"
+)
+
+// StatsConfig holds the stats confguration
+type StatsConfig struct {
+ MaxEvents int `json:"MaxEvents"`
+ MaxStats int `json:"MaxStats"`
+}
+
+type conEvent struct {
+ con *conman.Connection
+ match *rule.Rule
+ wasMissed bool
+}
+
+// Statistics holds the connections and statistics the daemon intercepts.
+// The connections are stored in the Events slice.
+type Statistics struct {
+ sync.RWMutex
+
+ Started time.Time
+ DNSResponses int
+ Connections int
+ Ignored int
+ Accepted int
+ Dropped int
+ RuleHits int
+ RuleMisses int
+ Events []*Event
+ ByProto map[string]uint64
+ ByAddress map[string]uint64
+ ByHost map[string]uint64
+ ByPort map[string]uint64
+ ByUID map[string]uint64
+ ByExecutable map[string]uint64
+
+ rules *rule.Loader
+ jobs chan conEvent
+ // max number of events to keep in the buffer
+ maxEvents int
+ // max number of entries for each By* map
+ maxStats int
+}
+
+// New returns a new Statistics object and initializes the go routines to update the stats.
+func New(rules *rule.Loader) (stats *Statistics) {
+ stats = &Statistics{
+ Started: time.Now(),
+ Events: make([]*Event, 0),
+ ByProto: make(map[string]uint64),
+ ByAddress: make(map[string]uint64),
+ ByHost: make(map[string]uint64),
+ ByPort: make(map[string]uint64),
+ ByUID: make(map[string]uint64),
+ ByExecutable: make(map[string]uint64),
+
+ rules: rules,
+ jobs: make(chan conEvent),
+ maxEvents: 150,
+ maxStats: 25,
+ }
+
+ go stats.eventWorker(0)
+ go stats.eventWorker(1)
+ go stats.eventWorker(2)
+ go stats.eventWorker(3)
+
+ return stats
+}
+
+// SetConfig configures the max events to keep in the backlog before sending
+// the stats to the UI, or while the UI is not connected.
+// if the backlog is full, it'll be shifted by one.
+func (s *Statistics) SetConfig(config StatsConfig) {
+ if config.MaxEvents > 0 {
+ s.maxEvents = config.MaxEvents
+ }
+ if config.MaxStats > 0 {
+ s.maxStats = config.MaxStats
+ }
+}
+
+// OnDNSResponse increases the counter of dns and accepted connections.
+func (s *Statistics) OnDNSResponse() {
+ s.Lock()
+ defer s.Unlock()
+ s.DNSResponses++
+ s.Accepted++
+}
+
+// OnIgnored increases the counter of ignored and accepted connections.
+func (s *Statistics) OnIgnored() {
+ s.Lock()
+ defer s.Unlock()
+ s.Ignored++
+ s.Accepted++
+}
+
+func (s *Statistics) incMap(m *map[string]uint64, key string) {
+ if val, found := (*m)[key]; found == false {
+ // do we have enough space left?
+ nElems := len(*m)
+ if nElems >= s.maxStats {
+ // find the element with less hits
+ nMin := uint64(9999999999)
+ minKey := ""
+ for k, v := range *m {
+ if v < nMin {
+ minKey = k
+ nMin = v
+ }
+ }
+ // remove it
+ if minKey != "" {
+ delete(*m, minKey)
+ }
+ }
+
+ (*m)[key] = 1
+ } else {
+ (*m)[key] = val + 1
+ }
+}
+
+func (s *Statistics) eventWorker(id int) {
+ log.Debug("Stats worker #%d started.", id)
+
+ for true {
+ select {
+ case job := <-s.jobs:
+ s.onConnection(job.con, job.match, job.wasMissed)
+ }
+ }
+}
+
+func (s *Statistics) onConnection(con *conman.Connection, match *rule.Rule, wasMissed bool) {
+ s.Lock()
+ defer s.Unlock()
+
+ s.Connections++
+
+ if wasMissed {
+ s.RuleMisses++
+ } else {
+ s.RuleHits++
+ }
+
+ if wasMissed == false && match.Action == rule.Allow {
+ s.Accepted++
+ } else {
+ s.Dropped++
+ }
+
+ s.incMap(&s.ByProto, con.Protocol)
+ s.incMap(&s.ByAddress, con.DstIP.String())
+ if con.DstHost != "" {
+ s.incMap(&s.ByHost, con.DstHost)
+ }
+ s.incMap(&s.ByPort, fmt.Sprintf("%d", con.DstPort))
+ s.incMap(&s.ByUID, fmt.Sprintf("%d", con.Entry.UserId))
+ s.incMap(&s.ByExecutable, con.Process.Path)
+
+ // if we reached the limit, shift everything back
+ // by one position
+ nEvents := len(s.Events)
+ if nEvents == s.maxEvents {
+ s.Events = s.Events[1:]
+ }
+ if wasMissed {
+ return
+ }
+ s.Events = append(s.Events, NewEvent(con, match))
+}
+
+// OnConnectionEvent sends the details of a new connection throughout a channel,
+// in order to add the connection to the stats.
+func (s *Statistics) OnConnectionEvent(con *conman.Connection, match *rule.Rule, wasMissed bool) {
+ s.jobs <- conEvent{
+ con: con,
+ match: match,
+ wasMissed: wasMissed,
+ }
+}
+
+func (s *Statistics) serializeEvents() []*protocol.Event {
+ nEvents := len(s.Events)
+ serialized := make([]*protocol.Event, nEvents)
+
+ for i, e := range s.Events {
+ serialized[i] = e.Serialize()
+ }
+
+ return serialized
+}
+
+// emptyStats empties the stats once we've sent them to the GUI.
+// We don't need them anymore here.
+func (s *Statistics) emptyStats() {
+ s.Lock()
+ if len(s.Events) > 0 {
+ s.Events = make([]*Event, 0)
+ }
+ s.Unlock()
+}
+
+// Serialize returns the collected statistics.
+// After return the stats, the Events are emptied, to keep collecting more stats
+// and not miss connections.
+func (s *Statistics) Serialize() *protocol.Statistics {
+ s.Lock()
+ defer s.emptyStats()
+ defer s.Unlock()
+
+ return &protocol.Statistics{
+ DaemonVersion: core.Version,
+ Rules: uint64(s.rules.NumRules()),
+ Uptime: uint64(time.Since(s.Started).Seconds()),
+ DnsResponses: uint64(s.DNSResponses),
+ Connections: uint64(s.Connections),
+ Ignored: uint64(s.Ignored),
+ Accepted: uint64(s.Accepted),
+ Dropped: uint64(s.Dropped),
+ RuleHits: uint64(s.RuleHits),
+ RuleMisses: uint64(s.RuleMisses),
+ Events: s.serializeEvents(),
+ ByProto: s.ByProto,
+ ByAddress: s.ByAddress,
+ ByHost: s.ByHost,
+ ByPort: s.ByPort,
+ ByUid: s.ByUID,
+ ByExecutable: s.ByExecutable,
+ }
+}
diff --git a/daemon/system-fw.json b/daemon/system-fw.json
new file mode 100644
index 0000000..400f6e2
--- /dev/null
+++ b/daemon/system-fw.json
@@ -0,0 +1,14 @@
+{
+ "SystemRules": [
+ {
+ "Rule": {
+ "Description": "Allow icmp",
+ "Table": "mangle",
+ "Chain": "OUTPUT",
+ "Parameters": "-p icmp",
+ "Target": "ACCEPT",
+ "TargetParameters": ""
+ }
+ }
+ ]
+}
diff --git a/daemon/ui/client.go b/daemon/ui/client.go
new file mode 100644
index 0000000..010a0bb
--- /dev/null
+++ b/daemon/ui/client.go
@@ -0,0 +1,343 @@
+package ui
+
+import (
+ "fmt"
+ "net"
+ "sync"
+ "time"
+
+ "github.com/evilsocket/opensnitch/daemon/conman"
+ "github.com/evilsocket/opensnitch/daemon/firewall/iptables"
+ "github.com/evilsocket/opensnitch/daemon/log"
+ "github.com/evilsocket/opensnitch/daemon/rule"
+ "github.com/evilsocket/opensnitch/daemon/statistics"
+ "github.com/evilsocket/opensnitch/daemon/ui/protocol"
+
+ "github.com/fsnotify/fsnotify"
+ "golang.org/x/net/context"
+ "google.golang.org/grpc"
+ "google.golang.org/grpc/connectivity"
+ "google.golang.org/grpc/keepalive"
+)
+
+var (
+ configFile = "/etc/opensnitchd/default-config.json"
+ dummyOperator, _ = rule.NewOperator(rule.Simple, false, rule.OpTrue, "", make([]rule.Operator, 0))
+ clientDisconnectedRule = rule.Create("ui.client.disconnected", true, false, rule.Allow, rule.Once, dummyOperator)
+ // While the GUI is connected, deny by default everything until the user takes an action.
+ clientConnectedRule = rule.Create("ui.client.connected", true, false, rule.Deny, rule.Once, dummyOperator)
+ clientErrorRule = rule.Create("ui.client.error", true, false, rule.Allow, rule.Once, dummyOperator)
+ config Config
+)
+
+type serverConfig struct {
+ Address string `json:"Address"`
+ LogFile string `json:"LogFile"`
+}
+
+// Config holds the values loaded from configFile
+type Config struct {
+ sync.RWMutex
+ Server serverConfig `json:"Server"`
+ DefaultAction string `json:"DefaultAction"`
+ DefaultDuration string `json:"DefaultDuration"`
+ InterceptUnknown bool `json:"InterceptUnknown"`
+ ProcMonitorMethod string `json:"ProcMonitorMethod"`
+ LogLevel *uint32 `json:"LogLevel"`
+ Firewall string `json:"Firewall"`
+ Stats statistics.StatsConfig `json:"Stats"`
+}
+
+// Client holds the connection information of a client.
+type Client struct {
+ sync.RWMutex
+ clientCtx context.Context
+ clientCancel context.CancelFunc
+
+ stats *statistics.Statistics
+ rules *rule.Loader
+ socketPath string
+ isUnixSocket bool
+ con *grpc.ClientConn
+ client protocol.UIClient
+ configWatcher *fsnotify.Watcher
+ streamNotifications protocol.UI_NotificationsClient
+ //isAsking is set to true if the client is awaiting a decision from the GUI
+ isAsking bool
+}
+
+// NewClient creates and configures a new client.
+func NewClient(socketPath string, stats *statistics.Statistics, rules *rule.Loader) *Client {
+ c := &Client{
+ stats: stats,
+ rules: rules,
+ isUnixSocket: false,
+ isAsking: false,
+ }
+ c.clientCtx, c.clientCancel = context.WithCancel(context.Background())
+
+ if watcher, err := fsnotify.NewWatcher(); err == nil {
+ c.configWatcher = watcher
+ }
+ c.loadDiskConfiguration(false)
+ if socketPath != "" {
+ c.setSocketPath(c.getSocketPath(socketPath))
+ }
+
+ go c.poller()
+ return c
+}
+
+// Close cancels the running tasks: pinging the server and (re)connection poller.
+func (c *Client) Close() {
+ c.clientCancel()
+}
+
+// ProcMonitorMethod returns the monitor method configured.
+// If it's not present in the config file, it'll return an empty string.
+func (c *Client) ProcMonitorMethod() string {
+ config.RLock()
+ defer config.RUnlock()
+ return config.ProcMonitorMethod
+}
+
+// InterceptUnknown returns
+func (c *Client) InterceptUnknown() bool {
+ config.RLock()
+ defer config.RUnlock()
+ return config.InterceptUnknown
+}
+
+// GetStatsConfig returns the stats config from disk
+func (c *Client) GetStatsConfig() statistics.StatsConfig {
+ config.RLock()
+ defer config.RUnlock()
+ return config.Stats
+}
+
+// GetFirewallType returns the firewall to use
+func (c *Client) GetFirewallType() string {
+ config.RLock()
+ defer config.RUnlock()
+ if config.Firewall == "" {
+ return iptables.Name
+ }
+ return config.Firewall
+}
+
+// DefaultAction returns the default configured action for
+func (c *Client) DefaultAction() rule.Action {
+ isConnected := c.Connected()
+
+ c.RLock()
+ defer c.RUnlock()
+
+ if isConnected {
+ return clientConnectedRule.Action
+ }
+
+ return clientDisconnectedRule.Action
+}
+
+// DefaultDuration returns the default duration configured for a rule.
+// For example it can be: once, always, "until restart".
+func (c *Client) DefaultDuration() rule.Duration {
+ c.RLock()
+ defer c.RUnlock()
+ return clientDisconnectedRule.Duration
+}
+
+// Connected checks if the client has established a connection with the server.
+func (c *Client) Connected() bool {
+ c.RLock()
+ defer c.RUnlock()
+ if c.con == nil || c.con.GetState() != connectivity.Ready {
+ return false
+ }
+ return true
+}
+
+//GetIsAsking returns the isAsking flag
+func (c *Client) GetIsAsking() bool {
+ c.RLock()
+ defer c.RUnlock()
+ return c.isAsking
+}
+
+//SetIsAsking sets the isAsking flag
+func (c *Client) SetIsAsking(flag bool) {
+ c.Lock()
+ defer c.Unlock()
+ c.isAsking = flag
+}
+
+func (c *Client) poller() {
+ log.Debug("UI service poller started for socket %s", c.socketPath)
+ wasConnected := false
+ for {
+ select {
+ case <-c.clientCtx.Done():
+ log.Info("Client.poller() exit, Done()")
+ goto Exit
+ default:
+ isConnected := c.Connected()
+ if wasConnected != isConnected {
+ c.onStatusChange(isConnected)
+ wasConnected = isConnected
+ }
+
+ if c.Connected() == false {
+ // connect and create the client if needed
+ if err := c.connect(); err != nil {
+ log.Warning("Error while connecting to UI service: %s", err)
+ }
+ }
+ if c.Connected() == true {
+ // if the client is connected and ready, send a ping
+ if err := c.ping(time.Now()); err != nil {
+ log.Warning("Error while pinging UI service: %s, state: %v", err, c.con.GetState())
+ }
+ }
+
+ time.Sleep(1 * time.Second)
+ }
+ }
+Exit:
+ log.Info("uiClient exit")
+}
+
+func (c *Client) onStatusChange(connected bool) {
+ if connected {
+ log.Info("Connected to the UI service on %s", c.socketPath)
+ go c.Subscribe()
+ } else {
+ log.Error("Connection to the UI service lost.")
+ c.disconnect()
+ }
+}
+
+func (c *Client) connect() (err error) {
+ if c.Connected() {
+ return
+ }
+
+ if c.con != nil {
+ if c.con.GetState() == connectivity.TransientFailure || c.con.GetState() == connectivity.Shutdown {
+ c.disconnect()
+ } else {
+ return
+ }
+ }
+
+ if err := c.openSocket(); err != nil {
+ c.disconnect()
+ return err
+ }
+
+ if c.client == nil {
+ c.client = protocol.NewUIClient(c.con)
+ }
+ return nil
+}
+
+func (c *Client) openSocket() (err error) {
+ c.Lock()
+ defer c.Unlock()
+
+ if c.isUnixSocket {
+ c.con, err = grpc.Dial(c.socketPath, grpc.WithInsecure(),
+ grpc.WithDialer(func(addr string, timeout time.Duration) (net.Conn, error) {
+ return net.DialTimeout("unix", addr, timeout)
+ }))
+ } else {
+ // https://pkg.go.dev/google.golang.org/grpc/keepalive#ClientParameters
+ var kacp = keepalive.ClientParameters{
+ Time: 5 * time.Second,
+ // if there's no activity after ^, wait 20s and close
+ // server timeout is 20s by default.
+ Timeout: 22 * time.Second,
+ // send pings even without active streams
+ PermitWithoutStream: true,
+ }
+
+ c.con, err = grpc.Dial(c.socketPath, grpc.WithInsecure(), grpc.WithKeepaliveParams(kacp))
+ }
+
+ return err
+}
+
+func (c *Client) disconnect() {
+ c.Lock()
+ defer c.Unlock()
+
+ c.client = nil
+ if c.con != nil {
+ c.con.Close()
+ c.con = nil
+ log.Debug("client.disconnect()")
+ }
+}
+
+func (c *Client) ping(ts time.Time) (err error) {
+ if c.Connected() == false {
+ return fmt.Errorf("service is not connected")
+ }
+
+ c.Lock()
+ defer c.Unlock()
+
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second)
+ defer cancel()
+ reqID := uint64(ts.UnixNano())
+
+ pReq := &protocol.PingRequest{
+ Id: reqID,
+ Stats: c.stats.Serialize(),
+ }
+ c.stats.RLock()
+ pong, err := c.client.Ping(ctx, pReq)
+ c.stats.RUnlock()
+ if err != nil {
+ return err
+ }
+
+ if pong.Id != reqID {
+ return fmt.Errorf("Expected pong with id 0x%x, got 0x%x", reqID, pong.Id)
+ }
+
+ return nil
+}
+
+// Ask sends a request to the server, with the values of a connection to be
+// allowed or denied.
+func (c *Client) Ask(con *conman.Connection) *rule.Rule {
+ if c.client == nil {
+ return nil
+ }
+
+ // FIXME: if timeout is fired, the rule is not added to the list in the GUI
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*120)
+ defer cancel()
+ reply, err := c.client.AskRule(ctx, con.Serialize())
+ if err != nil {
+ log.Warning("Error while asking for rule: %s - %v", err, con)
+ return nil
+ }
+
+ r, err := rule.Deserialize(reply)
+ if err != nil {
+ return nil
+ }
+ return r
+}
+
+func (c *Client) monitorConfigWorker() {
+ for {
+ select {
+ case event := <-c.configWatcher.Events:
+ if (event.Op&fsnotify.Write == fsnotify.Write) || (event.Op&fsnotify.Remove == fsnotify.Remove) {
+ c.loadDiskConfiguration(true)
+ }
+ }
+ }
+}
diff --git a/daemon/ui/config.go b/daemon/ui/config.go
new file mode 100644
index 0000000..a27bb46
--- /dev/null
+++ b/daemon/ui/config.go
@@ -0,0 +1,118 @@
+package ui
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "strings"
+
+ "github.com/evilsocket/opensnitch/daemon/log"
+ "github.com/evilsocket/opensnitch/daemon/procmon/monitor"
+ "github.com/evilsocket/opensnitch/daemon/rule"
+)
+
+func (c *Client) getSocketPath(socketPath string) string {
+ c.Lock()
+ defer c.Unlock()
+
+ if strings.HasPrefix(socketPath, "unix://") == true {
+ c.isUnixSocket = true
+ return socketPath[7:]
+ }
+
+ c.isUnixSocket = false
+ return socketPath
+}
+
+func (c *Client) setSocketPath(socketPath string) {
+ c.Lock()
+ defer c.Unlock()
+
+ c.socketPath = socketPath
+}
+
+func (c *Client) isProcMonitorEqual(newMonitorMethod string) bool {
+ config.RLock()
+ defer config.RUnlock()
+
+ return newMonitorMethod == config.ProcMonitorMethod
+}
+
+func (c *Client) parseConf(rawConfig string) (conf Config, err error) {
+ err = json.Unmarshal([]byte(rawConfig), &conf)
+ return conf, err
+}
+
+func (c *Client) loadDiskConfiguration(reload bool) {
+ raw, err := ioutil.ReadFile(configFile)
+ if err != nil {
+ fmt.Errorf("Error loading disk configuration %s: %s", configFile, err)
+ }
+
+ if ok := c.loadConfiguration(raw); ok {
+ if err := c.configWatcher.Add(configFile); err != nil {
+ log.Error("Could not watch path: %s", err)
+ return
+ }
+ }
+
+ if reload {
+ return
+ }
+
+ go c.monitorConfigWorker()
+}
+
+func (c *Client) loadConfiguration(rawConfig []byte) bool {
+ config.Lock()
+ defer config.Unlock()
+
+ if err := json.Unmarshal(rawConfig, &config); err != nil {
+ log.Error("Error parsing configuration %s: %s", configFile, err)
+ return false
+ }
+ // firstly load config level, to detect further errors if any
+ if config.LogLevel != nil {
+ log.SetLogLevel(int(*config.LogLevel))
+ }
+ if config.Server.LogFile != "" {
+ log.Close()
+ log.OpenFile(config.Server.LogFile)
+ }
+
+ if config.Server.Address != "" {
+ tempSocketPath := c.getSocketPath(config.Server.Address)
+ if tempSocketPath != c.socketPath {
+ // disconnect, and let the connection poller reconnect to the new address
+ c.disconnect()
+ }
+ c.setSocketPath(tempSocketPath)
+ }
+ if config.DefaultAction != "" {
+ clientDisconnectedRule.Action = rule.Action(config.DefaultAction)
+ clientErrorRule.Action = rule.Action(config.DefaultAction)
+ }
+ if config.DefaultDuration != "" {
+ clientDisconnectedRule.Duration = rule.Duration(config.DefaultDuration)
+ clientErrorRule.Duration = rule.Duration(config.DefaultDuration)
+ }
+ if config.ProcMonitorMethod != "" {
+ if err := monitor.ReconfigureMonitorMethod(config.ProcMonitorMethod); err != nil {
+ log.Warning("Unable to set new process monitor method from disk: %v", err)
+ }
+ }
+
+ return true
+}
+
+func (c *Client) saveConfiguration(rawConfig string) (err error) {
+ if c.loadConfiguration([]byte(rawConfig)) != true {
+ return fmt.Errorf("Error parsing configuration %s: %s", rawConfig, err)
+ }
+
+ if err = ioutil.WriteFile(configFile, []byte(rawConfig), 0644); err != nil {
+ log.Error("writing configuration to disk: %s", err)
+ return err
+ }
+ return nil
+}
diff --git a/daemon/ui/notifications.go b/daemon/ui/notifications.go
new file mode 100644
index 0000000..27fd929
--- /dev/null
+++ b/daemon/ui/notifications.go
@@ -0,0 +1,304 @@
+package ui
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/evilsocket/opensnitch/daemon/core"
+ "github.com/evilsocket/opensnitch/daemon/firewall"
+ "github.com/evilsocket/opensnitch/daemon/log"
+ "github.com/evilsocket/opensnitch/daemon/procmon"
+ "github.com/evilsocket/opensnitch/daemon/procmon/monitor"
+ "github.com/evilsocket/opensnitch/daemon/rule"
+ "github.com/evilsocket/opensnitch/daemon/ui/protocol"
+ "golang.org/x/net/context"
+)
+
+var stopMonitoringProcess = make(chan int)
+
+// NewReply constructs a new protocol notification reply
+func NewReply(rID uint64, replyCode protocol.NotificationReplyCode, data string) *protocol.NotificationReply {
+ return &protocol.NotificationReply{
+ Id: rID,
+ Code: replyCode,
+ Data: data,
+ }
+}
+
+func (c *Client) getClientConfig() *protocol.ClientConfig {
+ raw, _ := ioutil.ReadFile(configFile)
+ nodeName := core.GetHostname()
+ nodeVersion := core.GetKernelVersion()
+ var ts time.Time
+ rulesTotal := len(c.rules.GetAll())
+ ruleList := make([]*protocol.Rule, rulesTotal)
+ idx := 0
+ for _, r := range c.rules.GetAll() {
+ ruleList[idx] = r.Serialize()
+ idx++
+ }
+ return &protocol.ClientConfig{
+ Id: uint64(ts.UnixNano()),
+ Name: nodeName,
+ Version: nodeVersion,
+ IsFirewallRunning: firewall.IsRunning(),
+ Config: strings.Replace(string(raw), "\n", "", -1),
+ LogLevel: uint32(log.MinLevel),
+ Rules: ruleList,
+ }
+}
+
+func (c *Client) monitorProcessDetails(pid int, stream protocol.UI_NotificationsClient, notification *protocol.Notification) {
+ p := procmon.NewProcess(pid, "")
+ ticker := time.NewTicker(2 * time.Second)
+
+ for {
+ select {
+ case _pid := <-stopMonitoringProcess:
+ if _pid != pid {
+ continue
+ }
+ goto Exit
+ case <-ticker.C:
+ if err := p.GetInfo(); err != nil {
+ c.sendNotificationReply(stream, notification.Id, notification.Data, err)
+ goto Exit
+ }
+
+ pJSON, err := json.Marshal(p)
+ notification.Data = string(pJSON)
+ if errs := c.sendNotificationReply(stream, notification.Id, notification.Data, err); errs != nil {
+ goto Exit
+ }
+ }
+ }
+
+Exit:
+ ticker.Stop()
+}
+
+func (c *Client) handleActionChangeConfig(stream protocol.UI_NotificationsClient, notification *protocol.Notification) {
+ log.Info("[notification] Reloading configuration")
+ // Parse received configuration first, to get the new proc monitor method.
+ newConf, err := c.parseConf(notification.Data)
+ if err != nil {
+ log.Warning("[notification] error parsing received config: %v", notification.Data)
+ c.sendNotificationReply(stream, notification.Id, "", err)
+ return
+ }
+
+ if err := monitor.ReconfigureMonitorMethod(newConf.ProcMonitorMethod); err != nil {
+ c.sendNotificationReply(stream, notification.Id, "", err)
+ return
+ }
+
+ // this save operation triggers a re-loadConfiguration()
+ err = c.saveConfiguration(notification.Data)
+ if err != nil {
+ log.Warning("[notification] CHANGE_CONFIG not applied %s", err)
+ }
+
+ c.sendNotificationReply(stream, notification.Id, "", err)
+}
+
+func (c *Client) handleActionEnableRule(stream protocol.UI_NotificationsClient, notification *protocol.Notification) {
+ var err error
+ for _, rul := range notification.Rules {
+ log.Info("[notification] enable rule: %s", rul.Name)
+ // protocol.Rule(protobuf) != rule.Rule(json)
+ r, _ := rule.Deserialize(rul)
+ r.Enabled = true
+ // save to disk only if the duration is rule.Always
+ err = c.rules.Replace(r, r.Duration == rule.Always)
+ }
+ c.sendNotificationReply(stream, notification.Id, "", err)
+}
+
+func (c *Client) handleActionDisableRule(stream protocol.UI_NotificationsClient, notification *protocol.Notification) {
+ var err error
+ for _, rul := range notification.Rules {
+ log.Info("[notification] disable rule: %s", rul)
+ r, _ := rule.Deserialize(rul)
+ r.Enabled = false
+ err = c.rules.Replace(r, r.Duration == rule.Always)
+ }
+ c.sendNotificationReply(stream, notification.Id, "", err)
+}
+
+func (c *Client) handleActionChangeRule(stream protocol.UI_NotificationsClient, notification *protocol.Notification) {
+ var rErr error
+ for _, rul := range notification.Rules {
+ r, err := rule.Deserialize(rul)
+ if r == nil {
+ rErr = fmt.Errorf("Invalid rule, %s", err)
+ continue
+ }
+ log.Info("[notification] change rule: %s %d", r, notification.Id)
+ if err := c.rules.Replace(r, r.Duration == rule.Always); err != nil {
+ log.Warning("[notification] Error changing rule: %s %s", err, r)
+ rErr = err
+ }
+ }
+ c.sendNotificationReply(stream, notification.Id, "", rErr)
+}
+
+func (c *Client) handleActionDeleteRule(stream protocol.UI_NotificationsClient, notification *protocol.Notification) {
+ var err error
+ for _, rul := range notification.Rules {
+ log.Info("[notification] delete rule: %s %d", rul.Name, notification.Id)
+ err = c.rules.Delete(rul.Name)
+ if err != nil {
+ log.Error("[notification] Error deleting rule: %s %s", err, rul)
+ }
+ }
+ c.sendNotificationReply(stream, notification.Id, "", err)
+}
+
+func (c *Client) handleActionMonitorProcess(stream protocol.UI_NotificationsClient, notification *protocol.Notification) {
+ pid, err := strconv.Atoi(notification.Data)
+ if err != nil {
+ log.Error("parsing PID to monitor: %d, err: %s", pid, err)
+ return
+ }
+ if !core.Exists(fmt.Sprint("/proc/", pid)) {
+ c.sendNotificationReply(stream, notification.Id, "", fmt.Errorf("The process is no longer running"))
+ return
+ }
+ go c.monitorProcessDetails(pid, stream, notification)
+}
+
+func (c *Client) handleActionStopMonitorProcess(stream protocol.UI_NotificationsClient, notification *protocol.Notification) {
+ pid, err := strconv.Atoi(notification.Data)
+ if err != nil {
+ log.Error("parsing PID to stop monitor: %d, err: %s", pid, err)
+ c.sendNotificationReply(stream, notification.Id, "", fmt.Errorf("Error stopping monitor: %s", notification.Data))
+ return
+ }
+ stopMonitoringProcess <- pid
+ c.sendNotificationReply(stream, notification.Id, "", nil)
+}
+
+func (c *Client) handleNotification(stream protocol.UI_NotificationsClient, notification *protocol.Notification) {
+ switch {
+ case notification.Type == protocol.Action_MONITOR_PROCESS:
+ c.handleActionMonitorProcess(stream, notification)
+
+ case notification.Type == protocol.Action_STOP_MONITOR_PROCESS:
+ c.handleActionStopMonitorProcess(stream, notification)
+
+ case notification.Type == protocol.Action_CHANGE_CONFIG:
+ c.handleActionChangeConfig(stream, notification)
+
+ case notification.Type == protocol.Action_LOAD_FIREWALL:
+ log.Info("[notification] starting firewall")
+ firewall.Init(c.GetFirewallType(), nil)
+ c.sendNotificationReply(stream, notification.Id, "", nil)
+
+ case notification.Type == protocol.Action_UNLOAD_FIREWALL:
+ log.Info("[notification] stopping firewall")
+ firewall.Stop()
+ c.sendNotificationReply(stream, notification.Id, "", nil)
+
+ // ENABLE_RULE just replaces the rule on disk
+ case notification.Type == protocol.Action_ENABLE_RULE:
+ c.handleActionEnableRule(stream, notification)
+
+ case notification.Type == protocol.Action_DISABLE_RULE:
+ c.handleActionDisableRule(stream, notification)
+
+ case notification.Type == protocol.Action_DELETE_RULE:
+ c.handleActionDeleteRule(stream, notification)
+
+ // CHANGE_RULE can add() or replace) an existing rule.
+ case notification.Type == protocol.Action_CHANGE_RULE:
+ c.handleActionChangeRule(stream, notification)
+ }
+}
+
+func (c *Client) sendNotificationReply(stream protocol.UI_NotificationsClient, nID uint64, data string, err error) error {
+ reply := NewReply(nID, protocol.NotificationReplyCode_OK, data)
+ if err != nil {
+ reply.Code = protocol.NotificationReplyCode_ERROR
+ reply.Data = fmt.Sprint(err)
+ }
+ if err := stream.Send(reply); err != nil {
+ log.Error("Error replying to notification: %s %d", err, reply.Id)
+ return err
+ }
+
+ return nil
+}
+
+// Subscribe opens a connection with the server (UI), to start
+// receiving notifications.
+// It firstly sends the daemon status and configuration.
+func (c *Client) Subscribe() {
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*10)
+ defer cancel()
+
+ clientCfg, err := c.client.Subscribe(ctx, c.getClientConfig())
+ if err != nil {
+ log.Error("Subscribing to GUI %s", err)
+ // When connecting to the GUI via TCP, sometimes the notifications channel is
+ // not established, and the main channel is never closed.
+ // We need to disconnect everything after a timeout and try it again.
+ c.disconnect()
+ return
+ }
+
+ if tempConf, err := c.parseConf(clientCfg.Config); err == nil {
+ c.Lock()
+ clientConnectedRule.Action = rule.Action(tempConf.DefaultAction)
+ c.Unlock()
+ }
+ c.listenForNotifications()
+}
+
+// Notifications is the channel where the daemon receives messages from the server.
+// It consists of 2 grpc streams (send/receive) that are never closed,
+// this way we can share messages in realtime.
+// If the GUI is closed, we'll receive an error reading from the channel.
+func (c *Client) listenForNotifications() {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+
+ // open the stream channel
+ streamReply := &protocol.NotificationReply{Id: 0, Code: protocol.NotificationReplyCode_OK}
+ notisStream, err := c.client.Notifications(ctx)
+ if err != nil {
+ log.Error("establishing notifications channel %s", err)
+ return
+ }
+ // send the first notification
+ if err := notisStream.Send(streamReply); err != nil {
+ log.Error("sending notification HELLO %s", err)
+ return
+ }
+ log.Info("Start receiving notifications")
+ for {
+ select {
+ case <-c.clientCtx.Done():
+ goto Exit
+ default:
+ noti, err := notisStream.Recv()
+ if err == io.EOF {
+ log.Warning("notification channel closed by the server")
+ goto Exit
+ }
+ if err != nil {
+ log.Error("getting notifications: %s %s", err, noti)
+ goto Exit
+ }
+ c.handleNotification(notisStream, noti)
+ }
+ }
+Exit:
+ notisStream.CloseSend()
+ log.Info("Stop receiving notifications")
+ c.disconnect()
+}
diff --git a/daemon/ui/protocol/.gitkeep b/daemon/ui/protocol/.gitkeep
new file mode 100644
index 0000000..e69de29
diff --git a/debian/changelog b/debian/changelog
new file mode 100644
index 0000000..d231c03
--- /dev/null
+++ b/debian/changelog
@@ -0,0 +1,233 @@
+opensnitch (1.5.5-1) unstable; urgency=medium
+
+ * New upstream release.
+ * Bump Standards-Version to 4.6.2.
+ * Upload sponsored by Petter Reinholdtsen.
+
+ -- Gustavo Iñiguez Goya Wed, 01 Feb 2023 22:37:12 +0100
+
+opensnitch (1.5.4-1) unstable; urgency=high
+
+ * New upstream release. (Closes: #1030115)
+ * debian/control:
+ - Updated packages description.
+ - Removed debconf and whiptail|dialog dependencies.
+ - Added xdg-user-dirs, gtk-update-icon-cache dependencies.
+ - Point Vcs-Git field to the 1.5.0 branch.
+ * debian/postinst:
+ - Fixed opensnitch_ui.desktop installation.
+ - Fixed updating icons cache.
+ * debian/postrm:
+ - Fixed removing opensnitch_ui.desktop
+ * debian/tests/:
+ - Added autopkgtests.
+ * Upload sponsored by Petter Reinholdtsen.
+
+ -- Gustavo Iñiguez Goya Tue, 31 Jan 2023 23:48:58 +0100
+
+opensnitch (1.5.3-1) unstable; urgency=medium
+
+ * Added debian/upstream/metadata.
+ * Updated Homepage url.
+ * Updated Copyright years.
+
+ -- Gustavo-Iniguez-Goya Sun, 22 Jan 2023 21:30:45 +0100
+
+opensnitch (1.5.2.1-1) unstable; urgency=medium
+
+ * Initial release. (Closes: #909567)
+
+ -- Gustavo-Iniguez-Goya Fri, 20 Jan 2023 22:26:40 +0000
+
+opensnitch (1.5.2-1) unstable; urgency=medium
+
+ * try to mount debugfs on boot up
+
+ -- gustavo-iniguez-goya Wed, 27 Jul 2022 17:29:33 +0200
+
+opensnitch (1.5.1-1) unstable; urgency=medium
+
+ * Better eBPF cache.
+ * Fixed error resolving domains to localhost.
+ * Fixed error deleting our nftables rules.
+
+ -- gustavo-iniguez-goya Fri, 25 Feb 2022 01:21:38 +0100
+
+opensnitch (1.5.0-1) unstable; urgency=medium
+
+ * New release.
+ * Added Reject option.
+ * New lists types to block ads/malware/...
+ * Better connections interception.
+ * Better VPNs handling.
+ * Bug fixes.
+
+ -- gustavo-iniguez-goya Fri, 28 Jan 2022 23:20:38 +0100
+
+opensnitch (1.5.0~rc2-1) unstable; urgency=medium
+
+ * Better connections interception.
+ * Improvements.
+
+ -- gustavo-iniguez-goya Sun, 16 Jan 2022 23:15:12 +0100
+
+opensnitch (1.5.0~rc1-1) unstable; urgency=medium
+
+ * New features.
+
+ -- gustavo-iniguez-goya Thu, 07 Oct 2021 14:57:35 +0200
+
+opensnitch (1.4.0-1) unstable; urgency=medium
+
+ * final release.
+
+ -- gustavo-iniguez-goya Fri, 27 Aug 2021 13:33:07 +0200
+
+opensnitch (1.4.0~rc4-1) unstable; urgency=medium
+
+ * Bug fix release.
+
+ -- gustavo-iniguez-goya Wed, 11 Aug 2021 15:17:49 +0200
+
+opensnitch (1.4.0~rc3-1) unstable; urgency=medium
+
+ * Bug fix release.
+
+ -- gustavo-iniguez-goya Fri, 16 Jul 2021 23:28:52 +0200
+
+opensnitch (1.4.0~rc2-1) unstable; urgency=medium
+
+ * Added eBPF support.
+ * Fixes and improvements.
+
+ -- gustavo-iniguez-goya Fri, 07 May 2021 01:08:02 +0200
+
+opensnitch (1.4.0~rc-1) unstable; urgency=medium
+
+ * Bug fix and improvements release.
+
+ -- gustavo-iniguez-goya Thu, 25 Mar 2021 01:02:31 +0100
+
+opensnitch (1.3.6-1) unstable; urgency=medium
+
+ * Bug fix and improvements release.
+
+ -- gustavo-iniguez-goya Wed, 10 Feb 2021 10:17:43 +0100
+
+opensnitch (1.3.5-1) unstable; urgency=medium
+
+ * Bug fix and improvements release.
+
+ -- gustavo-iniguez-goya Mon, 11 Jan 2021 18:01:53 +0100
+
+opensnitch (1.3.0-1) unstable; urgency=medium
+
+ * Fixed how we check rules
+ * Fixed cpu spike after disable interception.
+ * Fixed cleaning up fw rules on exit.
+ * make regexp rules case-insensitive by default
+ * allow to filter by dst network.
+
+ -- gustavo-iniguez-goya Wed, 16 Dec 2020 01:15:03 +0100
+
+opensnitch (1.3.0~rc-1) unstable; urgency=medium
+
+ * Non-maintainer upload.
+
+ -- gustavo-iniguez-goya Fri, 13 Nov 2020 00:51:34 +0100
+
+opensnitch (1.2.0-1) unstable; urgency=medium
+
+ * Fixed memleaks.
+ * Sort rules by name
+ * Added priority field to rules.
+ * Other fixes
+
+ -- gustavo-iniguez-goya Mon, 09 Nov 2020 22:55:13 +0100
+
+opensnitch (1.0.1-1) unstable; urgency=medium
+
+ * Fixed app exit when IPv6 is not supported.
+ * Other fixes.
+
+ -- gustavo-iniguez-goya Thu, 30 Jul 2020 21:56:20 +0200
+
+opensnitch (1.0.0-1) unstable; urgency=medium
+
+ * v1.0.0 released.
+
+ -- gustavo-iniguez-goya Thu, 16 Jul 2020 00:19:26 +0200
+
+opensnitch (1.0.0rc11-1) unstable; urgency=medium
+
+ * Fixed multiple race conditions.
+ * Fixed CWD parsing when using audit proc monitor method.
+
+ -- gustavo-iniguez-goya Wed, 24 Jun 2020 00:10:38 +0200
+
+opensnitch (1.0.0rc10-1) unstable; urgency=medium
+
+ * Fixed checking UID functions availability.
+ * Improved process path parsing.
+ * Fixed applying config from the UI.
+ * Fixed default log level.
+ * Gather CWD and process environment vars.
+ * Increase default timeout when asking for a rule.
+
+ -- gustavo-iniguez-goya Sat, 13 Jun 2020 18:45:02 +0200
+
+opensnitch (1.0.0rc9-1) unstable; urgency=medium
+
+ * Ignore malformed rules from loading.
+ * Allow to modify and add rules from the UI.
+
+ -- gustavo-iniguez-goya Sun, 17 May 2020 18:18:24 +0200
+
+opensnitch (1.0.0rc8) unstable; urgency=medium
+
+ * Allow to change settings from the UI.
+ * Improved connection handling with the UI.
+
+ -- gustavo-iniguez-goya Wed, 29 Apr 2020 21:52:27 +0200
+
+opensnitch (1.0.0rc7-1) unstable; urgency=medium
+
+ * Stability, performance and realiability improvements.
+
+ -- gustavo-iniguez-goya Sun, 12 Apr 2020 23:25:41 +0200
+
+opensnitch (1.0.0rc6-1) unstable; urgency=medium
+
+ * Fixed iptables rules deletion.
+ * Improved PIDs cache.
+ * Added audit process monitoring method.
+ * Added logrotate file.
+ * Added default configuration file.
+
+ -- gustavo-iniguez-goya Sun, 08 Mar 2020 20:47:58 +0100
+
+opensnitch (1.0.0rc-5) unstable; urgency=medium
+
+ * Fixed netlink socket querying.
+ * Added check to reload firewall rules if missing.
+
+ -- gustavo-iniguez-goya Mon, 24 Feb 2020 19:55:06 +0100
+
+opensnitch (1.0.0rc-3) unstable; urgency=medium
+
+ * @see: https://github.com/gustavo-iniguez-goya/opensnitch/releases
+
+ -- gustavo-iniguez-goya Tue, 18 Feb 2020 10:09:45 +0100
+
+opensnitch (1.0.0rc-2) unstable; urgency=medium
+
+ * UI minor changes
+ * Expand deb package compatibility.
+
+ -- gustavo-iniguez-goya Wed, 05 Feb 2020 21:50:20 +0100
+
+opensnitch (1.0.0rc-1) unstable; urgency=medium
+
+ * Initial release
+
+ -- gustavo-iniguez-goya Fri, 22 Nov 2019 01:14:08 +0100
diff --git a/debian/control b/debian/control
new file mode 100644
index 0000000..f67967b
--- /dev/null
+++ b/debian/control
@@ -0,0 +1,95 @@
+Source: opensnitch
+Maintainer: Gustavo Iñiguez Goya
+Section: devel
+Testsuite: autopkgtest-pkg-go
+Priority: optional
+Build-Depends:
+ debhelper-compat (= 11),
+ dh-golang,
+ dh-python,
+ golang-any,
+ golang-github-evilsocket-ftrace-dev,
+ golang-github-fsnotify-fsnotify-dev,
+ golang-github-google-gopacket-dev,
+ golang-github-google-nftables-dev,
+ golang-github-iovisor-gobpf-dev,
+ golang-github-vishvananda-netlink-dev,
+ golang-golang-x-net-dev,
+ golang-google-grpc-dev,
+ golang-goprotobuf-dev,
+ libmnl-dev,
+ libnetfilter-queue-dev,
+ pkg-config,
+ protoc-gen-go-grpc,
+ pyqt5-dev-tools,
+ qttools5-dev-tools,
+ python3-all,
+ python3-grpc-tools,
+ python3-setuptools
+Standards-Version: 4.6.2
+Vcs-Browser: https://github.com/evilsocket/opensnitch
+Vcs-Git: https://github.com/evilsocket/opensnitch.git -b 1.5.0
+Homepage: https://github.com/evilsocket/opensnitch
+Rules-Requires-Root: no
+XS-Go-Import-Path: github.com/evilsocket/opensnitch
+
+Package: opensnitch
+Section: net
+Architecture: any
+Depends:
+ ${misc:Depends},
+ ${shlibs:Depends},
+Recommends: python3-opensnitch-ui
+Built-Using: ${misc:Built-Using}
+Description: GNU/Linux interactive application firewall
+ OpenSnitch is a GNU/Linux firewall application.
+ Whenever a program makes a connection, it'll prompt the user to allow or deny
+ it.
+ .
+ The user can decide if block the outgoing connection based on properties of
+ the connection: by port, by uid, by dst ip, by program or a combination
+ of them.
+ .
+ These rules can last forever, until the app restart or just one time.
+ .
+ The GUI allows the user to view live outgoing connections, as well as search
+ by process, user, host or port.
+ .
+ OpenSnitch can also work as a system-wide domains blocker, by using lists
+ of domains, list of IPs or list of regular expressions.
+
+
+Package: python3-opensnitch-ui
+Architecture: all
+Section: net
+Depends:
+ ${misc:Depends},
+ ${shlibs:Depends},
+ libqt5sql5-sqlite,
+ python3-grpcio,
+ python3-notify2,
+ python3-pyinotify,
+ python3-pyqt5,
+ python3-pyqt5.qtsql,
+ python3-setuptools,
+ python3-six,
+ python3-slugify,
+ python3:any,
+ xdg-user-dirs,
+ gtk-update-icon-cache
+Recommends:
+ python3-pyasn
+Suggests: opensnitch
+Description: GNU/Linux interactive application firewall GUI
+ opensnitch-ui is a GUI for opensnitch written in Python.
+ It allows the user to view live outgoing connections, as well as search
+ for details of the intercepted connections.
+ .
+ The user can decide if block outgoing connections based on properties of
+ the connection: by port, by uid, by dst ip, by program or a combination
+ of them.
+ .
+ These rules can last forever, until restart the daemon or just one time.
+ .
+ OpenSnitch can also work as a system-wide domains blocker, by using lists
+ of domains, list of IPs or list of regular expressions.
diff --git a/debian/copyright b/debian/copyright
new file mode 100644
index 0000000..a867271
--- /dev/null
+++ b/debian/copyright
@@ -0,0 +1,32 @@
+Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Source: https://github.com/evilsocket/opensnitch
+Upstream-Contact: Gustavo Iñiguez Goia
+Upstream-Name: opensnitch
+Files-Excluded:
+ Godeps/_workspace
+
+Files: *
+Copyright:
+ 2017-2018 evilsocket
+ 2019-2023 Gustavo Iñiguez Goia
+Comment: Debian packaging is licensed under the same terms as upstream
+License: GPL-3.0+
+ This program is free software; you can redistribute it
+ and/or modify it under the terms of the GNU General Public
+ License as published by the Free Software Foundation; either
+ version 3 of the License, or (at your option) any later
+ version.
+ .
+ This program is distributed in the hope that it will be
+ useful, but WITHOUT ANY WARRANTY; without even the implied
+ warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
+ PURPOSE. See the GNU General Public License for more
+ details.
+ .
+ You should have received a copy of the GNU General Public
+ License along with this program. If not, If not, see
+ http://www.gnu.org/licenses/.
+ .
+ On Debian systems, the full text of the GNU General Public
+ License version 3 can be found in the file
+ '/usr/share/common-licenses/GPL-3'.
diff --git a/debian/gbp.conf b/debian/gbp.conf
new file mode 100644
index 0000000..cec628c
--- /dev/null
+++ b/debian/gbp.conf
@@ -0,0 +1,2 @@
+[DEFAULT]
+pristine-tar = True
diff --git a/debian/gitlab-ci.yml b/debian/gitlab-ci.yml
new file mode 100644
index 0000000..91ff7ea
--- /dev/null
+++ b/debian/gitlab-ci.yml
@@ -0,0 +1,27 @@
+# auto-generated, DO NOT MODIFY.
+# The authoritative copy of this file lives at:
+# https://salsa.debian.org/go-team/ci/blob/master/config/gitlabciyml.go
+
+# TODO: publish under debian-go-team/ci
+image: stapelberg/ci2
+
+test_the_archive:
+ artifacts:
+ paths:
+ - before-applying-commit.json
+ - after-applying-commit.json
+ script:
+ # Create an overlay to discard writes to /srv/gopath/src after the build:
+ - "rm -rf /cache/overlay/{upper,work}"
+ - "mkdir -p /cache/overlay/{upper,work}"
+ - "mount -t overlay overlay -o lowerdir=/srv/gopath/src,upperdir=/cache/overlay/upper,workdir=/cache/overlay/work /srv/gopath/src"
+ - "export GOPATH=/srv/gopath"
+ - "export GOCACHE=/cache/go"
+ # Build the world as-is:
+ - "ci-build -exemptions=/var/lib/ci-build/exemptions.json > before-applying-commit.json"
+ # Copy this package into the overlay:
+ - "GBP_CONF_FILES=:debian/gbp.conf gbp buildpackage --git-no-pristine-tar --git-ignore-branch --git-ignore-new --git-export-dir=/tmp/export --git-no-overlay --git-tarball-dir=/nonexistant --git-cleaner=/bin/true --git-builder='dpkg-buildpackage -S -d --no-sign'"
+ - "pgt-gopath -dsc /tmp/export/*.dsc"
+ # Rebuild the world:
+ - "ci-build -exemptions=/var/lib/ci-build/exemptions.json > after-applying-commit.json"
+ - "ci-diff before-applying-commit.json after-applying-commit.json"
diff --git a/debian/opensnitch.init b/debian/opensnitch.init
new file mode 100644
index 0000000..77ce353
--- /dev/null
+++ b/debian/opensnitch.init
@@ -0,0 +1,78 @@
+#!/bin/sh
+
+### BEGIN INIT INFO
+# Provides: opensnitchd
+# Required-Start: $network $local_fs
+# Required-Stop: $network $local_fs
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: opensnitchd daemon
+# Description: opensnitch application firewall
+### END INIT INFO
+
+NAME=opensnitchd
+PIDDIR=/var/run/$NAME
+OPENSNITCHDPID=$PIDDIR/$NAME.pid
+
+# clear conflicting settings from the environment
+unset TMPDIR
+
+test -x /usr/bin/$NAME || exit 0
+
+. /lib/lsb/init-functions
+
+case $1 in
+ start)
+ log_daemon_msg "Starting opensnitch daemon" $NAME
+ if [ ! -d /etc/$NAME/rules ]; then
+ mkdir -p /etc/$NAME/rules &>/dev/null
+ fi
+
+ # Make sure we have our PIDDIR, even if it's on a tmpfs
+ install -o root -g root -m 755 -d $PIDDIR
+
+ if ! start-stop-daemon --start --quiet --oknodo --pidfile $OPENSNITCHDPID --background --exec /usr/bin/$NAME -- -rules-path /etc/$NAME/rules; then
+ log_end_msg 1
+ exit 1
+ fi
+
+ log_end_msg 0
+ ;;
+ stop)
+
+ log_daemon_msg "Stopping $NAME daemon" $NAME
+
+ start-stop-daemon --stop --quiet --signal QUIT --name $NAME
+ # Wait a little and remove stale PID file
+ sleep 1
+ if [ -f $OPENSNITCHDPID ] && ! ps h `cat $OPENSNITCHDPID` > /dev/null
+ then
+ rm -f $OPENSNITCHDPID
+ fi
+
+ log_end_msg 0
+
+ ;;
+ reload)
+ log_daemon_msg "Reloading $NAME" $NAME
+
+ start-stop-daemon --stop --quiet --signal HUP --pidfile $OPENSNITCHDPID
+
+ log_end_msg 0
+ ;;
+ restart|force-reload)
+ $0 stop
+ sleep 1
+ $0 start
+ ;;
+ status)
+ status_of_proc /usr/bin/$NAME $NAME
+ exit $?
+ ;;
+ *)
+ echo "Usage: /etc/init.d/opensnitchd {start|stop|reload|restart|force-reload|status}"
+ exit 1
+ ;;
+esac
+
+exit 0
diff --git a/debian/opensnitch.install b/debian/opensnitch.install
new file mode 100644
index 0000000..751664c
--- /dev/null
+++ b/debian/opensnitch.install
@@ -0,0 +1,3 @@
+daemon/default-config.json etc/opensnitchd/
+daemon/system-fw.json etc/opensnitchd/
+#ebpf_prog/opensnitch.o etc/opensnitchd/
diff --git a/debian/opensnitch.logrotate b/debian/opensnitch.logrotate
new file mode 100644
index 0000000..7e1d486
--- /dev/null
+++ b/debian/opensnitch.logrotate
@@ -0,0 +1,13 @@
+/var/log/opensnitchd.log {
+ rotate 7
+# order of the fields is important
+ maxsize 50M
+# we need this option in order to keep logging
+ copytruncate
+ missingok
+ notifempty
+ delaycompress
+ compress
+ create 640 root root
+ weekly
+}
diff --git a/debian/opensnitch.service b/debian/opensnitch.service
new file mode 100644
index 0000000..8d1b52f
--- /dev/null
+++ b/debian/opensnitch.service
@@ -0,0 +1,16 @@
+[Unit]
+Description=OpenSnitch is a GNU/Linux application firewall.
+Documentation=https://github.com/gustavo-iniguez-goya/opensnitch/wiki
+Wants=network.target
+After=network.target
+
+[Service]
+Type=simple
+PermissionsStartOnly=true
+ExecStartPre=/bin/mkdir -p /etc/opensnitchd/rules
+ExecStart=/usr/bin/opensnitchd -rules-path /etc/opensnitchd/rules
+Restart=always
+RestartSec=30
+
+[Install]
+WantedBy=multi-user.target
diff --git a/debian/python3-opensnitch-ui.postinst b/debian/python3-opensnitch-ui.postinst
new file mode 100755
index 0000000..0b7ab1e
--- /dev/null
+++ b/debian/python3-opensnitch-ui.postinst
@@ -0,0 +1,18 @@
+#!/bin/sh
+
+set -e
+
+autostart_by_default()
+{
+ if [ -f /etc/xdg/autostart -a ! -f /etc/xdg/autostart/opensnitch_ui.desktop ]; then
+ ln -s /usr/share/applications/opensnitch_ui.desktop /etc/xdg/autostart/
+ fi
+}
+
+autostart_by_default
+
+if command -v gtk-update-icon-cache >/dev/null && test -f /usr/share/icons/hicolor/index.theme ; then
+ gtk-update-icon-cache --quiet /usr/share/icons/hicolor/
+fi
+
+#DEBHELPER#
diff --git a/debian/python3-opensnitch-ui.postrm b/debian/python3-opensnitch-ui.postrm
new file mode 100755
index 0000000..8189482
--- /dev/null
+++ b/debian/python3-opensnitch-ui.postrm
@@ -0,0 +1,15 @@
+#!/bin/sh
+set -e
+
+case "$1" in
+ purge)
+ if [ -f /etc/xdg/autostart/opensnitch_ui.desktop ];then
+ rm -f /etc/xdg/autostart/opensnitch_ui.desktop
+ fi
+ ;;
+ remove)
+ pkill -15 opensnitch-ui || true
+ ;;
+esac
+
+#DEBHELPER#
diff --git a/debian/rules b/debian/rules
new file mode 100755
index 0000000..72f3a4d
--- /dev/null
+++ b/debian/rules
@@ -0,0 +1,42 @@
+#!/usr/bin/make -f
+export DH_VERBOSE = 1
+export DESTDIR := $(shell pwd)/debian/opensnitch
+export UIDESTDIR := $(shell pwd)/debian/python3-opensnitch-ui
+
+override_dh_installsystemd:
+ dh_installsystemd --restart-after-upgrade
+
+override_dh_auto_build:
+ $(MAKE) protocol
+# Workaround for Go build problem when building in _build
+ mkdir -p _build/src/github.com/evilsocket/opensnitch/daemon/ui/protocol/
+ cp daemon/ui/protocol/* _build/src/github.com/evilsocket/opensnitch/daemon/ui/protocol/
+ dh_auto_build
+ cd ui && python3 setup.py build --force
+
+override_dh_auto_install:
+# daemon
+ mkdir -p $(DESTDIR)/usr/bin
+ cp _build/bin/daemon $(DESTDIR)/usr/bin/opensnitchd
+# GUI
+ make -C ui/i18n
+ cp -r ui/i18n/locales/ ui/opensnitch/i18n/
+ pyrcc5 -o ui/opensnitch/resources_rc.py ui/opensnitch/res/resources.qrc
+ sed -i 's/^import ui_pb2/from . import ui_pb2/' ui/opensnitch/ui_pb2*
+ cd ui && python3 setup.py install --force --root=$(UIDESTDIR) --no-compile -O0 --install-layout=deb
+
+# daemon
+ dh_auto_install
+
+%:
+ dh $@ --builddirectory=_build --buildsystem=golang --with=golang,python3
+
+override_dh_auto_clean:
+ dh_auto_clean
+ $(MAKE) clean
+ $(RM) ui/opensnitch/resources_rc.py
+ $(RM) -r ui/opensnitch/i18n/
+ $(RM) ui/i18n/locales/*/*.qm
+ cd ui && python3 setup.py clean -a
+ $(RM) -r ui/opensnitch_ui.egg-info/
+ find ui -name \*.pyc -exec rm {} \;
diff --git a/debian/source/format b/debian/source/format
new file mode 100644
index 0000000..163aaf8
--- /dev/null
+++ b/debian/source/format
@@ -0,0 +1 @@
+3.0 (quilt)
diff --git a/debian/source/options b/debian/source/options
new file mode 100644
index 0000000..bcc4bbb
--- /dev/null
+++ b/debian/source/options
@@ -0,0 +1 @@
+extend-diff-ignore="\.egg-info$"
\ No newline at end of file
diff --git a/debian/tests/control b/debian/tests/control
new file mode 100644
index 0000000..2ae9569
--- /dev/null
+++ b/debian/tests/control
@@ -0,0 +1,2 @@
+Tests: test-resources.sh
+Depends: opensnitch
diff --git a/debian/tests/test-resources.sh b/debian/tests/test-resources.sh
new file mode 100755
index 0000000..560d7c5
--- /dev/null
+++ b/debian/tests/test-resources.sh
@@ -0,0 +1,13 @@
+#!/bin/sh
+set -e
+
+ophome="/etc/opensnitchd"
+
+ls -dl $ophome 1>/dev/null
+echo "installed OK: $ophome"
+ls -l $ophome/system-fw.json 1>/dev/null
+echo "installed OK: $ophome/system-fw.json"
+ls -l $ophome/default-config.json 1>/dev/null
+echo "installed OK: $ophome/default-config.json"
+ls -dl $ophome/rules 1>/dev/null
+echo "installed OK: $ophome/rules/"
diff --git a/debian/upstream/metadata b/debian/upstream/metadata
new file mode 100644
index 0000000..556a1cf
--- /dev/null
+++ b/debian/upstream/metadata
@@ -0,0 +1,9 @@
+---
+Name: opensnitch
+Bug-Database: https://github.com/evilsocket/opensnitch/issues
+Bug-Submit: https://github.com/evilsocket/opensnitch/issues/new
+Contact: Gustavo Iñiguez Goia
+Documentation: https://github.com/evilsocket/opensnitch/wiki
+CPE: cpe:/a:evilsocket:opensnitch
+Repository: https://github.com/evilsocket/opensnitch.git
+Repository-Browse: https://github.com/evilsocket/opensnitch
diff --git a/debian/watch b/debian/watch
new file mode 100644
index 0000000..383dd73
--- /dev/null
+++ b/debian/watch
@@ -0,0 +1,4 @@
+version=4
+opts=filenamemangle=s/.+\/v?(\d\S*)\.tar\.gz/opensnitch-\$1\.tar\.gz/,\
+uversionmangle=s/(\d)[_\.\-\+]?(RC|rc|pre|dev|beta|alpha)[.]?(\d*)$/\$1~\$2\$3/ \
+ https://github.com/evilsocket/opensnitch/tags .*/v?(\d\S*)\.tar\.gz
diff --git a/ebpf_prog/Makefile b/ebpf_prog/Makefile
new file mode 100644
index 0000000..934951c
--- /dev/null
+++ b/ebpf_prog/Makefile
@@ -0,0 +1,159 @@
+#taken from /samples/bpf/Makefile and removed all targets
+
+# SPDX-License-Identifier: GPL-2.0
+
+BPF_SAMPLES_PATH ?= $(abspath $(srctree)/$(src))
+TOOLS_PATH := $(BPF_SAMPLES_PATH)/../../tools
+
+# Libbpf dependencies
+LIBBPF = $(TOOLS_PATH)/lib/bpf/libbpf.a
+
+CGROUP_HELPERS := ../../tools/testing/selftests/bpf/cgroup_helpers.o
+TRACE_HELPERS := ../../tools/testing/selftests/bpf/trace_helpers.o
+
+always-y += opensnitch.o
+
+ifeq ($(ARCH), arm)
+# Strip all except -D__LINUX_ARM_ARCH__ option needed to handle linux
+# headers when arm instruction set identification is requested.
+ARM_ARCH_SELECTOR := $(filter -D__LINUX_ARM_ARCH__%, $(KBUILD_CFLAGS))
+BPF_EXTRA_CFLAGS := $(ARM_ARCH_SELECTOR)
+TPROGS_CFLAGS += $(ARM_ARCH_SELECTOR)
+endif
+
+TPROGS_CFLAGS += -Wall -O2
+TPROGS_CFLAGS += -Wmissing-prototypes
+TPROGS_CFLAGS += -Wstrict-prototypes
+
+TPROGS_CFLAGS += -I$(objtree)/usr/include
+TPROGS_CFLAGS += -I$(srctree)/tools/testing/selftests/bpf/
+TPROGS_CFLAGS += -I$(srctree)/tools/lib/
+TPROGS_CFLAGS += -I$(srctree)/tools/include
+TPROGS_CFLAGS += -I$(srctree)/tools/perf
+TPROGS_CFLAGS += -DHAVE_ATTR_TEST=0
+
+ifdef SYSROOT
+TPROGS_CFLAGS += --sysroot=$(SYSROOT)
+TPROGS_LDFLAGS := -L$(SYSROOT)/usr/lib
+endif
+
+TPROGCFLAGS_bpf_load.o += -Wno-unused-variable
+
+TPROGS_LDLIBS += $(LIBBPF) -lelf -lz
+TPROGLDLIBS_tracex4 += -lrt
+TPROGLDLIBS_trace_output += -lrt
+TPROGLDLIBS_map_perf_test += -lrt
+TPROGLDLIBS_test_overhead += -lrt
+TPROGLDLIBS_xdpsock += -pthread
+
+# Allows pointing LLC/CLANG to a LLVM backend with bpf support, redefine on cmdline:
+# make M=samples/bpf/ LLC=~/git/llvm/build/bin/llc CLANG=~/git/llvm/build/bin/clang
+LLC ?= llc
+CLANG ?= clang
+LLVM_OBJCOPY ?= llvm-objcopy
+BTF_PAHOLE ?= pahole
+
+# Detect that we're cross compiling and use the cross compiler
+ifdef CROSS_COMPILE
+CLANG_ARCH_ARGS = --target=$(notdir $(CROSS_COMPILE:%-=%))
+endif
+
+# Don't evaluate probes and warnings if we need to run make recursively
+ifneq ($(src),)
+HDR_PROBE := $(shell printf "\#include \n struct list_head { int a; }; int main() { return 0; }" | \
+ $(CC) $(TPROGS_CFLAGS) $(TPROGS_LDFLAGS) -x c - \
+ -o /dev/null 2>/dev/null && echo okay)
+
+ifeq ($(HDR_PROBE),)
+$(warning WARNING: Detected possible issues with include path.)
+$(warning WARNING: Please install kernel headers locally (make headers_install).)
+endif
+
+BTF_LLC_PROBE := $(shell $(LLC) -march=bpf -mattr=help 2>&1 | grep dwarfris)
+BTF_PAHOLE_PROBE := $(shell $(BTF_PAHOLE) --help 2>&1 | grep BTF)
+BTF_OBJCOPY_PROBE := $(shell $(LLVM_OBJCOPY) --help 2>&1 | grep -i 'usage.*llvm')
+BTF_LLVM_PROBE := $(shell echo "int main() { return 0; }" | \
+ $(CLANG) -target bpf -O2 -g -c -x c - -o ./llvm_btf_verify.o; \
+ readelf -S ./llvm_btf_verify.o | grep BTF; \
+ /bin/rm -f ./llvm_btf_verify.o)
+
+BPF_EXTRA_CFLAGS += -fno-stack-protector
+ifneq ($(BTF_LLVM_PROBE),)
+ BPF_EXTRA_CFLAGS += -g
+else
+ifneq ($(and $(BTF_LLC_PROBE),$(BTF_PAHOLE_PROBE),$(BTF_OBJCOPY_PROBE)),)
+ BPF_EXTRA_CFLAGS += -g
+ LLC_FLAGS += -mattr=dwarfris
+ DWARF2BTF = y
+endif
+endif
+endif
+
+# Trick to allow make to be run from this directory
+all:
+ $(MAKE) -C ../../ M=$(CURDIR) BPF_SAMPLES_PATH=$(CURDIR)
+
+clean:
+ $(MAKE) -C ../../ M=$(CURDIR) clean
+ @find $(CURDIR) -type f -name '*~' -delete
+
+$(LIBBPF): FORCE
+# Fix up variables inherited from Kbuild that tools/ build system won't like
+ $(MAKE) -C $(dir $@) RM='rm -rf' EXTRA_CFLAGS="$(TPROGS_CFLAGS)" \
+ LDFLAGS=$(TPROGS_LDFLAGS) srctree=$(BPF_SAMPLES_PATH)/../../ O=
+
+$(obj)/syscall_nrs.h: $(obj)/syscall_nrs.s FORCE
+ $(call filechk,offsets,__SYSCALL_NRS_H__)
+
+targets += syscall_nrs.s
+clean-files += syscall_nrs.h
+
+FORCE:
+
+
+# Verify LLVM compiler tools are available and bpf target is supported by llc
+.PHONY: verify_cmds verify_target_bpf $(CLANG) $(LLC)
+
+verify_cmds: $(CLANG) $(LLC)
+ @for TOOL in $^ ; do \
+ if ! (which -- "$${TOOL}" > /dev/null 2>&1); then \
+ echo "*** ERROR: Cannot find LLVM tool $${TOOL}" ;\
+ exit 1; \
+ else true; fi; \
+ done
+
+verify_target_bpf: verify_cmds
+ @if ! (${LLC} -march=bpf -mattr=help > /dev/null 2>&1); then \
+ echo "*** ERROR: LLVM (${LLC}) does not support 'bpf' target" ;\
+ echo " NOTICE: LLVM version >= 3.7.1 required" ;\
+ exit 2; \
+ else true; fi
+
+$(BPF_SAMPLES_PATH)/*.c: verify_target_bpf $(LIBBPF)
+$(src)/*.c: verify_target_bpf $(LIBBPF)
+
+$(obj)/tracex5_kern.o: $(obj)/syscall_nrs.h
+$(obj)/hbm_out_kern.o: $(src)/hbm.h $(src)/hbm_kern.h
+$(obj)/hbm.o: $(src)/hbm.h
+$(obj)/hbm_edt_kern.o: $(src)/hbm.h $(src)/hbm_kern.h
+
+-include $(BPF_SAMPLES_PATH)/Makefile.target
+
+# asm/sysreg.h - inline assembly used by it is incompatible with llvm.
+# But, there is no easy way to fix it, so just exclude it since it is
+# useless for BPF samples.
+$(obj)/%.o: $(src)/%.c
+ @echo " CLANG-bpf " $@
+ $(Q)$(CLANG) $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(BPF_EXTRA_CFLAGS) \
+ -I$(obj) -I$(srctree)/tools/testing/selftests/bpf/ \
+ -I$(srctree)/tools/lib/ \
+ -D__KERNEL__ -D__BPF_TRACING__ -Wno-unused-value -Wno-pointer-sign \
+ -D__TARGET_ARCH_$(SRCARCH) -Wno-compare-distinct-pointer-types \
+ -Wno-gnu-variable-sized-type-not-at-end \
+ -Wno-address-of-packed-member -Wno-tautological-compare \
+ -Wno-unknown-warning-option $(CLANG_ARCH_ARGS) \
+ -I$(srctree)/samples/bpf/ -include asm_goto_workaround.h \
+ -O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf $(LLC_FLAGS) -filetype=obj -o $@
+ifeq ($(DWARF2BTF),y)
+ $(BTF_PAHOLE) -J $@
+endif
diff --git a/ebpf_prog/README b/ebpf_prog/README
new file mode 100644
index 0000000..ae33e00
--- /dev/null
+++ b/ebpf_prog/README
@@ -0,0 +1,29 @@
+opensnitch.c is an eBPF program. Compilation requires getting kernel source.
+
+sudo apt install clang llvm libelf-dev libzip-dev flex bison libssl-dev bc rsync python3
+cd opensnitch
+wget https://github.com/torvalds/linux/archive/v5.8.tar.gz
+tar -xf v5.8.tar.gz
+patch linux-5.8/tools/lib/bpf/bpf_helpers.h < ebpf_prog/file.patch
+cp ebpf_prog/opensnitch.c ebpf_prog/Makefile linux-5.8/samples/bpf
+cd linux-5.8 && yes "" | make oldconfig && make prepare && make headers_install # (1 min)
+cd samples/bpf && make
+objdump -h opensnitch.o #you should see many section, number 1 should be called kprobe/tcp_v4_connect
+llvm-strip -g opensnitch.o #remove debug info
+sudo cp opensnitch.o /etc/opensnitchd/
+cd ../../../daemon
+
+--opensnitchd expects to find opensnitch.o in /etc/opensnitchd/
+--start opensnitchd with:
+
+opensnitchd -rules-path /etc/opensnitchd/rules -process-monitor-method ebpf
+
+The kernel where you intend to run it must have some options activated:
+
+$ grep BPF /boot/config-$(uname -r)
+CONFIG_CGROUP_BPF=y
+CONFIG_BPF=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_BPF_EVENTS=y
+CONFIG_KPROBES=y
+CONFIG_KPROBE_EVENTS=y
diff --git a/ebpf_prog/arm-clang-asm-fix.patch b/ebpf_prog/arm-clang-asm-fix.patch
new file mode 100644
index 0000000..d8dd394
--- /dev/null
+++ b/ebpf_prog/arm-clang-asm-fix.patch
@@ -0,0 +1,14 @@
+--- ../../arch/arm/include/asm/unified.h 2021-04-20 10:47:54.075834124 +0000
++++ ../../arch/arm/include/asm/unified-clang-fix.h 2021-04-20 10:47:38.943811970 +0000
+@@ -11,7 +11,10 @@
+ #if defined(__ASSEMBLY__)
+ .syntax unified
+ #else
+-__asm__(".syntax unified");
++//__asm__(".syntax unified");
++#ifndef __clang__
++ __asm__(".syntax unified");
++#endif
+ #endif
+
+ #ifdef CONFIG_CPU_V7M
diff --git a/ebpf_prog/file.patch b/ebpf_prog/file.patch
new file mode 100644
index 0000000..a9c3668
--- /dev/null
+++ b/ebpf_prog/file.patch
@@ -0,0 +1,11 @@
+--- linux-5.8/tools/lib/bpf/bpf_helpers.h 2020-08-03 00:21:45.000000000 +0300
++++ linux-5.8/tools/lib/bpf/bpf_helpersnew.h 2021-02-23 18:45:21.789624834 +0300
+@@ -54,7 +54,7 @@
+ * Helper structure used by eBPF C program
+ * to describe BPF map attributes to libbpf loader
+ */
+-struct bpf_map_def {
++struct bpf_map_defold {
+ unsigned int type;
+ unsigned int key_size;
+ unsigned int value_size;
diff --git a/ebpf_prog/opensnitch.c b/ebpf_prog/opensnitch.c
new file mode 100644
index 0000000..916740a
--- /dev/null
+++ b/ebpf_prog/opensnitch.c
@@ -0,0 +1,508 @@
+#define KBUILD_MODNAME "dummy"
+
+//uncomment if building on x86_32
+//#define OPENSNITCH_x86_32
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#define MAPSIZE 12000
+
+//-------------------------------map definitions
+// which github.com/iovisor/gobpf/elf expects
+#define BUF_SIZE_MAP_NS 256
+
+typedef struct bpf_map_def {
+ unsigned int type;
+ unsigned int key_size;
+ unsigned int value_size;
+ unsigned int max_entries;
+ unsigned int map_flags;
+ unsigned int pinning;
+ char namespace[BUF_SIZE_MAP_NS];
+} bpf_map_def;
+
+enum bpf_pin_type {
+ PIN_NONE = 0,
+ PIN_OBJECT_NS,
+ PIN_GLOBAL_NS,
+ PIN_CUSTOM_NS,
+};
+//-----------------------------------
+
+// even though we only need 32 bits of pid, on x86_32 ebpf verifier complained when pid type was set to u32
+typedef u64 pid_size_t;
+typedef u64 uid_size_t;
+
+struct tcp_key_t {
+ u16 sport;
+ u32 daddr;
+ u16 dport;
+ u32 saddr;
+}__attribute__((packed));
+
+struct tcp_value_t{
+ pid_size_t pid;
+ uid_size_t uid;
+ u64 counter;
+}__attribute__((packed));
+
+// not using unsigned __int128 because it is not supported on x86_32
+struct ipV6 {
+ u64 part1;
+ u64 part2;
+}__attribute__((packed));
+
+struct tcpv6_key_t {
+ u16 sport;
+ struct ipV6 daddr;
+ u16 dport;
+ struct ipV6 saddr;
+}__attribute__((packed));
+
+struct tcpv6_value_t{
+ pid_size_t pid;
+ uid_size_t uid;
+ u64 counter;
+}__attribute__((packed));;
+
+struct udp_key_t {
+ u16 sport;
+ u32 daddr;
+ u16 dport;
+ u32 saddr;
+} __attribute__((packed));
+
+struct udp_value_t{
+ pid_size_t pid;
+ uid_size_t uid;
+ u64 counter;
+}__attribute__((packed));
+
+struct udpv6_key_t {
+ u16 sport;
+ struct ipV6 daddr;
+ u16 dport;
+ struct ipV6 saddr;
+}__attribute__((packed));
+
+struct udpv6_value_t{
+ pid_size_t pid;
+ uid_size_t uid;
+ u64 counter;
+}__attribute__((packed));
+
+
+// on x86_32 "struct sock" is arranged differently from x86_64 (at least on Debian kernels).
+// We hardcode offsets of IP addresses.
+struct sock_on_x86_32_t {
+ u8 data_we_dont_care_about[40];
+ struct ipV6 daddr;
+ struct ipV6 saddr;
+};
+
+
+// Add +1,+2,+3 etc. to map size helps to easier distinguish maps in bpftool's output
+struct bpf_map_def SEC("maps/tcpMap") tcpMap = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(struct tcp_key_t),
+ .value_size = sizeof(struct tcp_value_t),
+ .max_entries = MAPSIZE+1,
+};
+struct bpf_map_def SEC("maps/tcpv6Map") tcpv6Map = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(struct tcpv6_key_t),
+ .value_size = sizeof(struct tcpv6_value_t),
+ .max_entries = MAPSIZE+2,
+};
+struct bpf_map_def SEC("maps/udpMap") udpMap = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(struct udp_key_t),
+ .value_size = sizeof(struct udp_value_t),
+ .max_entries = MAPSIZE+3,
+};
+struct bpf_map_def SEC("maps/udpv6Map") udpv6Map = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(struct udpv6_key_t),
+ .value_size = sizeof(struct udpv6_value_t),
+ .max_entries = MAPSIZE+4,
+};
+
+// for TCP the IP-tuple can be copied from "struct sock" only upon return from tcp_connect().
+// We stash the socket here to look it up upon return.
+struct bpf_map_def SEC("maps/tcpsock") tcpsock = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(u64),
+ .value_size = sizeof(u64),// using u64 instead of sizeof(struct sock *)
+ // to avoid pointer size related quirks on x86_32
+ .max_entries = 100,
+};
+struct bpf_map_def SEC("maps/tcpv6sock") tcpv6sock = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(u64),
+ .value_size = sizeof(u64),
+ .max_entries = 100,
+};
+
+// //counts how many connections we've processed. Starts at 0.
+struct bpf_map_def SEC("maps/tcpcounter") tcpcounter = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(u32),
+ .value_size = sizeof(u64),
+ .max_entries = 1,
+};
+struct bpf_map_def SEC("maps/tcpv6counter") tcpv6counter = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(u32),
+ .value_size = sizeof(u64),
+ .max_entries = 1,
+};
+struct bpf_map_def SEC("maps/udpcounter") udpcounter = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(u32),
+ .value_size = sizeof(u64),
+ .max_entries = 1,
+};
+struct bpf_map_def SEC("maps/udpv6counter") udpv6counter = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(u32),
+ .value_size = sizeof(u64),
+ .max_entries = 1,
+};
+struct bpf_map_def SEC("maps/debugcounter") debugcounter = {
+ .type = BPF_MAP_TYPE_ARRAY,
+ .key_size = sizeof(u32),
+ .value_size = sizeof(u64),
+ .max_entries = 1,
+};
+
+// size 150 gave ebpf verifier errors for kernel 4.14, 100 is ok
+// we can cast any struct into rawBytes_t to be able to access arbitrary bytes of the struct
+struct rawBytes_t {
+ u8 bytes[100];
+};
+
+
+//used for debug purposes only
+struct bpf_map_def SEC("maps/bytes") bytes = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(u32),
+ .value_size = sizeof(u32),
+ .max_entries = 222,
+};
+
+//used for debug purposes only
+struct bpf_map_def SEC("maps/debug") debug = {
+ .type = BPF_MAP_TYPE_HASH,
+ .key_size = sizeof(struct tcpv6_key_t),
+ .value_size = sizeof(struct rawBytes_t),
+ .max_entries = 555,
+};
+
+
+// initializing variables with __builtin_memset() is required
+// for compatibility with bpf on kernel 4.4
+
+SEC("kprobe/tcp_v4_connect")
+int kprobe__tcp_v4_connect(struct pt_regs *ctx)
+{
+ #ifdef OPENSNITCH_x86_32
+ // On x86_32 platforms I couldn't get function arguments using PT_REGS_PARM1
+ // that's why we are accessing registers directly
+ struct sock *sk = (struct sock *)((ctx)->ax);
+ #else
+ struct sock *sk = (struct sock *)PT_REGS_PARM1(ctx);
+ #endif
+
+ u64 skp = (u64)sk;
+ u64 pid_tgid = bpf_get_current_pid_tgid();
+ bpf_map_update_elem(&tcpsock, &pid_tgid, &skp, BPF_ANY);
+ return 0;
+};
+
+SEC("kretprobe/tcp_v4_connect")
+int kretprobe__tcp_v4_connect(struct pt_regs *ctx)
+{
+ u64 pid_tgid = bpf_get_current_pid_tgid();
+ u64 *skp = bpf_map_lookup_elem(&tcpsock, &pid_tgid);
+ if (skp == NULL) {return 0;}
+
+ struct sock *sk;
+ __builtin_memset(&sk, 0, sizeof(sk));
+ sk = (struct sock *)*skp;
+
+ struct tcp_key_t tcp_key;
+ __builtin_memset(&tcp_key, 0, sizeof(tcp_key));
+ bpf_probe_read(&tcp_key.dport, sizeof(tcp_key.dport), &sk->__sk_common.skc_dport);
+ bpf_probe_read(&tcp_key.sport, sizeof(tcp_key.sport), &sk->__sk_common.skc_num);
+ bpf_probe_read(&tcp_key.daddr, sizeof(tcp_key.daddr), &sk->__sk_common.skc_daddr);
+ bpf_probe_read(&tcp_key.saddr, sizeof(tcp_key.saddr), &sk->__sk_common.skc_rcv_saddr);
+
+ u32 zero_key = 0;
+ u64 *val = bpf_map_lookup_elem(&tcpcounter, &zero_key);
+ if (val == NULL){return 0;}
+
+ struct tcp_value_t tcp_value;
+ __builtin_memset(&tcp_value, 0, sizeof(tcp_value));
+ tcp_value.pid = pid_tgid >> 32;
+ tcp_value.uid = bpf_get_current_uid_gid() & 0xffffffff;
+ tcp_value.counter = *val;
+ bpf_map_update_elem(&tcpMap, &tcp_key, &tcp_value, BPF_ANY);
+
+ u64 newval = *val + 1;
+ bpf_map_update_elem(&tcpcounter, &zero_key, &newval, BPF_ANY);
+ bpf_map_delete_elem(&tcpsock, &pid_tgid);
+ return 0;
+};
+
+
+SEC("kprobe/tcp_v6_connect")
+int kprobe__tcp_v6_connect(struct pt_regs *ctx)
+{
+ #ifdef OPENSNITCH_x86_32
+ struct sock *sk = (struct sock *)((ctx)->ax);
+ #else
+ struct sock *sk = (struct sock *)PT_REGS_PARM1(ctx);
+ #endif
+
+ u64 skp = (u64)sk;
+ u64 pid_tgid = bpf_get_current_pid_tgid();
+ bpf_map_update_elem(&tcpv6sock, &pid_tgid, &skp, BPF_ANY);
+ return 0;
+};
+
+SEC("kretprobe/tcp_v6_connect")
+int kretprobe__tcp_v6_connect(struct pt_regs *ctx)
+{
+ u64 pid_tgid = bpf_get_current_pid_tgid();
+ u64 *skp = bpf_map_lookup_elem(&tcpv6sock, &pid_tgid);
+ if (skp == NULL) {return 0;}
+ struct sock *sk;
+ __builtin_memset(&sk, 0, sizeof(sk));
+ sk = (struct sock *)*skp;
+
+ struct tcpv6_key_t tcpv6_key;
+ __builtin_memset(&tcpv6_key, 0, sizeof(tcpv6_key));
+ bpf_probe_read(&tcpv6_key.dport, sizeof(tcpv6_key.dport), &sk->__sk_common.skc_dport);
+ bpf_probe_read(&tcpv6_key.sport, sizeof(tcpv6_key.sport), &sk->__sk_common.skc_num);
+ #ifdef OPENSNITCH_x86_32
+ struct sock_on_x86_32_t sock;
+ __builtin_memset(&sock, 0, sizeof(sock));
+ bpf_probe_read(&sock, sizeof(sock), *(&sk));
+ tcpv6_key.daddr = sock.daddr;
+ tcpv6_key.saddr = sock.saddr;
+ #else
+ bpf_probe_read(&tcpv6_key.daddr, sizeof(tcpv6_key.daddr), &sk->__sk_common.skc_v6_daddr.in6_u.u6_addr32);
+ bpf_probe_read(&tcpv6_key.saddr, sizeof(tcpv6_key.saddr), &sk->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
+ #endif
+
+ u32 zero_key = 0;
+ u64 *val = bpf_map_lookup_elem(&tcpv6counter, &zero_key);
+ if (val == NULL){return 0;}
+
+ struct tcpv6_value_t tcpv6_value;
+ __builtin_memset(&tcpv6_value, 0, sizeof(tcpv6_value));
+ tcpv6_value.pid = pid_tgid >> 32;
+ tcpv6_value.uid = bpf_get_current_uid_gid() & 0xffffffff;
+ tcpv6_value.counter = *val;
+ bpf_map_update_elem(&tcpv6Map, &tcpv6_key, &tcpv6_value, BPF_ANY);
+
+ u64 newval = *val + 1;
+ bpf_map_update_elem(&tcpv6counter, &zero_key, &newval, BPF_ANY);
+ bpf_map_delete_elem(&tcpv6sock, &pid_tgid);
+ return 0;
+};
+
+
+SEC("kprobe/udp_sendmsg")
+int kprobe__udp_sendmsg(struct pt_regs *ctx)
+{
+ #ifdef OPENSNITCH_x86_32
+ struct sock *sk = (struct sock *)((ctx)->ax);
+ struct msghdr *msg = (struct msghdr *)((ctx)->dx);
+ #else
+ struct sock *sk = (struct sock *)PT_REGS_PARM1(ctx);
+ struct msghdr *msg = (struct msghdr *)PT_REGS_PARM2(ctx);
+ #endif
+
+ u64 msg_name; //pointer
+ __builtin_memset(&msg_name, 0, sizeof(msg_name));
+ bpf_probe_read(&msg_name, sizeof(msg_name), &msg->msg_name);
+ struct sockaddr_in * usin = (struct sockaddr_in *)msg_name;
+
+ struct udp_key_t udp_key;
+ __builtin_memset(&udp_key, 0, sizeof(udp_key));
+ bpf_probe_read(&udp_key.dport, sizeof(udp_key.dport), &usin->sin_port);
+ if (udp_key.dport != 0){ //likely
+ bpf_probe_read(&udp_key.daddr, sizeof(udp_key.daddr), &usin->sin_addr.s_addr);
+ }
+ else {
+ //very rarely dport can be found in skc_dport
+ bpf_probe_read(&udp_key.dport, sizeof(udp_key.dport), &sk->__sk_common.skc_dport);
+ bpf_probe_read(&udp_key.daddr, sizeof(udp_key.daddr), &sk->__sk_common.skc_daddr);
+ }
+ bpf_probe_read(&udp_key.sport, sizeof(udp_key.sport), &sk->__sk_common.skc_num);
+ bpf_probe_read(&udp_key.saddr, sizeof(udp_key.saddr), &sk->__sk_common.skc_rcv_saddr);
+
+ u32 zero_key = 0;
+ __builtin_memset(&zero_key, 0, sizeof(zero_key));
+ u64 *counterVal = bpf_map_lookup_elem(&udpcounter, &zero_key);
+ if (counterVal == NULL){return 0;}
+ struct udp_value_t *lookedupValue = bpf_map_lookup_elem(&udpMap, &udp_key);
+ u64 pid = bpf_get_current_pid_tgid() >> 32;
+ if ( lookedupValue == NULL || lookedupValue->pid != pid) {
+ struct udp_value_t udp_value;
+ __builtin_memset(&udp_value, 0, sizeof(udp_value));
+ udp_value.pid = pid;
+ udp_value.uid = bpf_get_current_uid_gid() & 0xffffffff;
+ udp_value.counter = *counterVal;
+ bpf_map_update_elem(&udpMap, &udp_key, &udp_value, BPF_ANY);
+
+ u64 newval = *counterVal + 1;
+ bpf_map_update_elem(&udpcounter, &zero_key, &newval, BPF_ANY);
+ }
+ //else nothing to do
+ return 0;
+
+};
+
+
+SEC("kprobe/udpv6_sendmsg")
+int kprobe__udpv6_sendmsg(struct pt_regs *ctx)
+{
+ #ifdef OPENSNITCH_x86_32
+ struct sock *sk = (struct sock *)((ctx)->ax);
+ struct msghdr *msg = (struct msghdr *)((ctx)->dx);
+ #else
+ struct sock *sk = (struct sock *)PT_REGS_PARM1(ctx);
+ struct msghdr *msg = (struct msghdr *)PT_REGS_PARM2(ctx);
+ #endif
+
+ u64 msg_name; //a pointer
+ __builtin_memset(&msg_name, 0, sizeof(msg_name));
+ bpf_probe_read(&msg_name, sizeof(msg_name), &msg->msg_name);
+
+ struct udpv6_key_t udpv6_key;
+ __builtin_memset(&udpv6_key, 0, sizeof(udpv6_key));
+ bpf_probe_read(&udpv6_key.dport, sizeof(udpv6_key.dport), &sk->__sk_common.skc_dport);
+ if (udpv6_key.dport != 0){ //likely
+ bpf_probe_read(&udpv6_key.daddr, sizeof(udpv6_key.daddr), &sk->__sk_common.skc_v6_daddr.in6_u.u6_addr32);
+ }
+ else {
+ struct sockaddr_in6 * sin6 = (struct sockaddr_in6 *)msg_name;
+ bpf_probe_read(&udpv6_key.dport, sizeof(udpv6_key.dport), &sin6->sin6_port);
+ bpf_probe_read(&udpv6_key.daddr, sizeof(udpv6_key.daddr), &sin6->sin6_addr.in6_u.u6_addr32);
+ }
+
+ bpf_probe_read(&udpv6_key.sport, sizeof(udpv6_key.sport), &sk->__sk_common.skc_num);
+ bpf_probe_read(&udpv6_key.saddr, sizeof(udpv6_key.saddr), &sk->__sk_common.skc_v6_rcv_saddr.in6_u.u6_addr32);
+
+
+ #ifdef OPENSNITCH_x86_32
+ struct sock_on_x86_32_t sock;
+ __builtin_memset(&sock, 0, sizeof(sock));
+ bpf_probe_read(&sock, sizeof(sock), *(&sk));
+ udpv6_key.daddr = sock.daddr;
+ udpv6_key.saddr = sock.saddr;
+ #endif
+
+ u32 zero_key = 0;
+ u64 *counterVal = bpf_map_lookup_elem(&udpv6counter, &zero_key);
+ if (counterVal == NULL){return 0;}
+ struct udpv6_value_t *lookedupValue = bpf_map_lookup_elem(&udpv6Map, &udpv6_key);
+ u64 pid = bpf_get_current_pid_tgid() >> 32;
+ if ( lookedupValue == NULL || lookedupValue->pid != pid) {
+ struct udpv6_value_t udpv6_value;
+ __builtin_memset(&udpv6_value, 0, sizeof(udpv6_value));
+ udpv6_value.pid = pid;
+ udpv6_value.uid = bpf_get_current_uid_gid() & 0xffffffff;
+ udpv6_value.counter = *counterVal;
+ bpf_map_update_elem(&udpv6Map, &udpv6_key, &udpv6_value, BPF_ANY);
+ u64 newval = *counterVal + 1;
+ bpf_map_update_elem(&udpv6counter, &zero_key, &newval, BPF_ANY);
+ }
+ //else nothing to do
+ return 0;
+
+};
+
+SEC("kprobe/iptunnel_xmit")
+int kprobe__iptunnel_xmit(struct pt_regs *ctx)
+{
+ #ifdef OPENSNITCH_x86_32
+ // TODO
+ return 0;
+ #else
+ struct sk_buff *skb = (struct sk_buff *)PT_REGS_PARM3(ctx);
+ u32 src = (u32)PT_REGS_PARM4(ctx);
+ u32 dst = (u32)PT_REGS_PARM5(ctx);
+ #endif
+
+ u16 sport = 0;
+ unsigned char *head;
+ u16 pkt_hdr;
+ __builtin_memset(&head, 0, sizeof(head));
+ __builtin_memset(&pkt_hdr, 0, sizeof(pkt_hdr));
+ bpf_probe_read(&head, sizeof(head), &skb->head);
+ bpf_probe_read(&pkt_hdr, sizeof(pkt_hdr), &skb->transport_header);
+ struct udphdr *udph;
+ __builtin_memset(&udph, 0, sizeof(udph));
+
+ udph = (struct udphdr *)(head + pkt_hdr);
+ bpf_probe_read(&sport, sizeof(sport), &udph->source);
+ sport = (sport >> 8) | ((sport << 8) & 0xff00);
+
+ struct udp_key_t udp_key;
+ struct udp_value_t udp_value;
+ u32 zero_key = 0;
+ __builtin_memset(&udp_key, 0, sizeof(udp_key));
+ __builtin_memset(&udp_value, 0, sizeof(udp_value));
+
+ bpf_probe_read(&udp_key.sport, sizeof(udp_key.sport), &sport);
+ bpf_probe_read(&udp_key.dport, sizeof(udp_key.dport), &udph->dest);
+ bpf_probe_read(&udp_key.saddr, sizeof(udp_key.saddr), &src);
+ bpf_probe_read(&udp_key.daddr, sizeof(udp_key.daddr), &dst);
+
+ u64 *counterVal = bpf_map_lookup_elem(&udpcounter, &zero_key);
+ if (counterVal == NULL){return 0;}
+
+ struct udp_value_t *lookedupValue = bpf_map_lookup_elem(&udpMap, &udp_key);
+ u64 pid = bpf_get_current_pid_tgid() >> 32;
+ if ( lookedupValue == NULL || lookedupValue->pid != pid) {
+ udp_value.pid = pid;
+ udp_value.uid = bpf_get_current_uid_gid() & 0xffffffff;
+ udp_value.counter = *counterVal;
+ bpf_map_update_elem(&udpMap, &udp_key, &udp_value, BPF_ANY);
+ u64 newval = *counterVal + 1;
+ bpf_map_update_elem(&udpcounter, &zero_key, &newval, BPF_ANY);
+ }
+
+ return 0;
+
+};
+
+// debug only: increment key's value by 1 in map "bytes"
+void increment(u32 key){
+ u32 *lookedupValue = bpf_map_lookup_elem(&bytes, &key);
+ if (lookedupValue == NULL){
+ u32 zero = 0;
+ bpf_map_update_elem(&bytes, &key, &zero, BPF_ANY);
+ }
+ else {
+ u32 newval = *lookedupValue + 1;
+ bpf_map_update_elem(&bytes, &key, &newval, BPF_ANY);
+ }
+}
+
+char _license[] SEC("license") = "GPL";
+// this number will be interpreted by the elf loader
+// to set the current running kernel version
+u32 _version SEC("version") = 0xFFFFFFFE;
diff --git a/proto/.gitignore b/proto/.gitignore
new file mode 100644
index 0000000..0d20b64
--- /dev/null
+++ b/proto/.gitignore
@@ -0,0 +1 @@
+*.pyc
diff --git a/proto/Makefile b/proto/Makefile
new file mode 100644
index 0000000..bbbe2c6
--- /dev/null
+++ b/proto/Makefile
@@ -0,0 +1,14 @@
+all: ../daemon/ui/protocol/ui.pb.go ../ui/opensnitch/ui_pb2.py
+
+../daemon/ui/protocol/ui.pb.go: ui.proto
+ protoc -I. ui.proto --go_out=../daemon/ui/protocol/ --go-grpc_out=../daemon/ui/protocol/ --go_opt=paths=source_relative --go-grpc_opt=paths=source_relative
+
+../ui/opensnitch/ui_pb2.py: ui.proto
+ python3 -m grpc_tools.protoc -I. --python_out=../ui/opensnitch/ --grpc_python_out=../ui/opensnitch/ ui.proto
+
+clean:
+ @rm -rf ../daemon/ui/protocol/ui.pb.go
+ @rm -rf ../daemon/ui/protocol/ui_grpc.pb.go
+ @rm -rf ../ui/opensnitch/ui_pb2.py
+ @rm -rf ../ui/opensnitch/ui_pb2_grpc.py
+
diff --git a/proto/ui.proto b/proto/ui.proto
new file mode 100644
index 0000000..d28ff6d
--- /dev/null
+++ b/proto/ui.proto
@@ -0,0 +1,129 @@
+syntax = "proto3";
+
+package protocol;
+
+option go_package = "github.com/evilsocket/opensnitch/daemon/ui/protocol";
+
+service UI {
+ rpc Ping(PingRequest) returns (PingReply) {}
+ rpc AskRule (Connection) returns (Rule) {}
+ rpc Subscribe (ClientConfig) returns (ClientConfig) {}
+ rpc Notifications (stream NotificationReply) returns (stream Notification) {}
+}
+
+message Event {
+ string time = 1;
+ Connection connection = 2;
+ Rule rule = 3;
+ int64 unixnano = 4;
+}
+
+message Statistics {
+ string daemon_version = 1;
+ uint64 rules = 2;
+ uint64 uptime = 3;
+ uint64 dns_responses = 4;
+ uint64 connections = 5;
+ uint64 ignored = 6;
+ uint64 accepted = 7;
+ uint64 dropped = 8;
+ uint64 rule_hits = 9;
+ uint64 rule_misses = 10;
+ map by_proto = 11;
+ map by_address = 12;
+ map by_host = 13;
+ map by_port = 14;
+ map by_uid = 15;
+ map by_executable = 16;
+ repeated Event events = 17;
+}
+
+message PingRequest {
+ uint64 id = 1;
+ Statistics stats = 2;
+}
+
+message PingReply {
+ uint64 id = 1;
+}
+
+message Connection {
+ string protocol = 1;
+ string src_ip = 2;
+ uint32 src_port = 3;
+ string dst_ip = 4;
+ string dst_host = 5;
+ uint32 dst_port = 6;
+ uint32 user_id = 7;
+ uint32 process_id = 8;
+ string process_path = 9;
+ string process_cwd = 10;
+ repeated string process_args = 11;
+ map process_env = 12;
+}
+
+message Operator {
+ string type = 1;
+ string operand = 2;
+ string data = 3;
+ bool sensitive = 4;
+}
+
+message Rule {
+ string name = 1;
+ bool enabled = 2;
+ bool precedence = 3;
+ string action = 4;
+ string duration = 5;
+ Operator operator = 6;
+}
+
+enum Action {
+ NONE = 0;
+ LOAD_FIREWALL = 1;
+ UNLOAD_FIREWALL = 2;
+ CHANGE_CONFIG = 3;
+ ENABLE_RULE = 4;
+ DISABLE_RULE = 5;
+ DELETE_RULE = 6;
+ CHANGE_RULE = 7;
+ LOG_LEVEL = 8;
+ STOP = 9;
+ MONITOR_PROCESS = 10;
+ STOP_MONITOR_PROCESS = 11;
+}
+
+// client configuration sent on Subscribe()
+message ClientConfig {
+ uint64 id = 1;
+ string name = 2;
+ string version = 3;
+ bool isFirewallRunning = 4;
+ // daemon configuration as json string
+ string config = 5;
+ uint32 logLevel = 6;
+ repeated Rule rules = 7;
+}
+
+// notification sent to the clients (daemons)
+message Notification {
+ uint64 id = 1;
+ string clientName = 2;
+ string serverName = 3;
+ // CHANGE_CONFIG: 2, data: {"default_timeout": 1, ...}
+ Action type = 4;
+ string data = 5;
+ repeated Rule rules = 6;
+}
+
+// notification reply sent to the server (GUI)
+message NotificationReply {
+ uint64 id = 1;
+ NotificationReplyCode code = 2;
+ string data = 3;
+}
+
+enum NotificationReplyCode {
+ OK = 0;
+ ERROR = 1;
+}
diff --git a/release.sh b/release.sh
new file mode 100755
index 0000000..d282411
--- /dev/null
+++ b/release.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# nothing to see here, just a utility i use to create new releases ^_^
+
+CURRENT_VERSION=$(cat daemon/core/version.go | grep Version | cut -d '"' -f 2)
+TO_UPDATE=(
+ daemon/core/version.go
+ ui/version.py
+)
+
+echo -n "Current version is $CURRENT_VERSION, select new version: "
+read NEW_VERSION
+echo "Creating version $NEW_VERSION ...\n"
+
+for file in "${TO_UPDATE[@]}"
+do
+ echo "Patching $file ..."
+ sed -i "s/$CURRENT_VERSION/$NEW_VERSION/g" $file
+ git add $file
+done
+
+git commit -m "Releasing v$NEW_VERSION"
+git push
+
+git tag -a v$NEW_VERSION -m "Release v$NEW_VERSION"
+git push origin v$NEW_VERSION
+
+echo
+echo "All done, v$NEW_VERSION released ^_^"
diff --git a/screenshots/opensnitch-ui-general-tab-deny.png b/screenshots/opensnitch-ui-general-tab-deny.png
new file mode 100644
index 0000000000000000000000000000000000000000..809fc8efa2768a65825a57f981fd91323b1ee238
GIT binary patch
literal 109267
zcmcG$bySvH_cr=~0xBv7B@&{5fOI#C0@B^BbV_$9A_9Wa4N9k!bhmVON_Tgj`RsT5
z8}Ijh=ZrJXA7_uj-W#}|`(F22bIp0p>$(;`(vrfrZV=o+AP~1iUcZt>Ag*{I5U9e}
z(BUVwKRGtxKUb{c&Ikg&?u0K@+{?oG|?;
z#i--9{Pui7*TQynX4-Hode-7`t0@6#>ATX>ff_}ToYnk>k~gD2Q!b3huSQ{Vv=ziM
z*IUOgavHGAF&p<&l3y~Be!L@a4gPh>x4Y2MH+QaPa57&wq$#&Dlp{6UR)S4AR@cY&U`YnQw<>k)DVbaJO!KJSvMI|Nm=UYSD
zW4VxO)qG|JPbPi?q}oGRmf{bDg6FL@y#@PXhJCnskp^=qe)-C)GO
zue{wYi|sX5Y!MtB>{(dIT4+2_S6ll+!-9nXhFMZlGCec%p}3eGk52v3)2H5hdp3kb
zM4NDn{@rKgEg64jw<*7%prxhdMQWc+vAM?D+8P6azhKqc`g*!t>TN|u#ie~GxPrWz
zXM+nNkL?u`LkP=J>bOh6ZZS#5${Gqkunow#Bk+0W`?pwpF;iK#9iy`}D!;eS7QOGx
zObN?i?>X_pyQ~akCiWOI;NP@&F)CEeP|69=%uymqdVD-zwo3IKE`R~X#Kxv8Wa1(i
z85xsOQf8KxJ{K15Pf$>OyfZHr|9WXNdf0V=sYl5|$7cJ7uRHH3&q$~c4#~8EswC>-
z!9{Bke|@Rb6Cavb=D10e{>|=aK8a@&+rF_uE*@x?#UG?4G~%2u6;w+5E0K^5lCra!_=Z~)Xps55wzciUd^!3p~x#Ed(d-}U~@7{dKmeqg!f=UR%
z%*T6GyyYI0~z`l5D%{|-N%ms)t|9oKT*slJT7?S|s~
zXFvJe97c_wi|AigT(4mK*dbWg`_^8vv$Gf6&Aozg`_99MCPizo#)KU2YD^=eqaQ-N9>&BkhXw}LF2=h{
z3JD4QSZ$-O=CWPMHSBq8w>JE~R$KdUW5QV4f&QOYF38W{Iy@B48H%Po+}U}|!$W9b
zuvji3dyTYJ1l|3-B4mngqA9V(-C_N>c{?B7pV1#vty-^kVR!A=jkYkQ>h;^^(=i>z
zpX&x~d1}*r{_In3H!iC&h;dvgMutPzjALERPOA19T?bxmN~vIW8+HfP<=I9XDx5wr
z9^?P+(k2o~m{}frIUJ@$07>Vzm6cWh>C7W`LoT~D{7l8{H+3JcJ2*SvCn4!-k7Q0)
zDWK?xVwEHDePd$cr76v!)zzTQz+lixDDd)SB(F1DPW1&B>>&NI5Cu20(OSi&lT?&@Rq)rG&oVzZferYx(`VB=t;mA6vT5UV2W80&fPkV$%
z_>(CkajH*^5AX$wt@~8$x5-YWl6%txuTJ@^3CM;oUaKmNuKY3d+w%H%qr>++19cA{
zJ{Ku(*ob%6(K2Ky-jkI7ITTiP&MjB3a(akezgaT6@?iAg*SuAE?3U3R{p$>$Yc))?
z!@~qJ?$&rre)q85z$g_m@Xk)Jv436P@-A%NqHFg1Pud*!t*hIY&f%ed(5HF$C7nX!
zscrPnE^j^RzVSO?u^~)r*p4ZvMBaq@2(33`N}B8BVuCHQhw$hEQ=m(oR6)t7EEa>Y
z#M<_Tzi0Lb!(9ia)5CRxj-qDAOexl^_h-dVut)eLsobT=JCp{~&3um5uDF%XNQSdt
ze!UpTel&a4Z%V5vozVTHf;lPx=NIL&cLcxDgC7bfyY91GaxX}_wk1}F?RFk-rz@5y
zr3*)=p7iFG-6uG=#0^mU-Po+D{e4!3jpPg2Kq^L}aA|Zn>n!RjUv$T&r8&>VRI$U)
z_xDpW9&XrV^{3~djqyF=3|UvRv*O{2T5VBLEl;{SA-E-!nAd%IP_IYJ1
z{vhkmXSP(_KRN$s6)-8X*AEx!(yeytz2^&?x;RA3{7N=0Urb9xQ1)*3cUQZX-DkCx
zea0t42S0V|9lb;cT;9F@I)8LRZhE7usg?b+YA3-LC3k~^%MRUIsWDDFMxPkeLPyFr6{SB$U*1q2Zv6&-878&p2>hQ++1Va?md_MWRkQP$>Br>vL0
zXefqOR}7!klPQ`B5r-l?|53XX+jHvDuNcM890?j@
zF$7b!j;J1eymRQMm?@ap^DQ>kjBTHhlQWX>ZPOim{NFL0)@h2_1Qu1t6zS>d`eS+q
z22eS>jm2;eu*2dta~8)es^@p++g$c~gpW>7cWW_7?%cm$U+0B6fRP8OCS|C|Y;3sH
z`Y92&o$geXtLbD#X$_lwG)c&ttKFTQ0sZ~*ow3})$)cf%61Q`%k&zLmbGd8VISIN+
zfhfwH9I{(sl(dpR>6Tc&MMmy-$!^}!bX}#&SjN7G)jsRFc{$U)asQ+4r+nU452Ya`
z{BwC$8&LwB;HTT?rTm=1JNWCdR)<3lzYH4Y-uLH!S{>up8!e-(6Uh0|Z{G0z&p89l
zRJ+e=IA`7}(^I>gX3pD1{{4xDQ_r8x
zr@xk-*yYjkT#1gl;$SWQlzKk4a>xElQhIk_U~qg&N@1W$wOifZKI6xDzh>nE(H}~;
zSr1<9ZA^V(#thh6je5yKG*V~kx-c^3YaZXzGR4Q_OhNJn
z_Ue7XB{qNVoiwk!Bj@>GV}?`4lIB7BhIOotDL0!pVmoncCcTB!Y1g2AoUnt$xo(MX
zZWDE6NWbReWp6FZS|u6JbyETM^v$
zbiN7Q5JIv;SfY0kD>hsJ%Uaso=UPLbJ>N+CUQl4|6VLMmhI7R~o41HkH!0}#(3g82!J)#e)GtcbxQR`XZ$*-D@NzdjjR3XIMTrXQfjajDA;a{9o^ulZDD
zwk_DRY+7mQrMB3+EEfIfMIACmQ}(e#;-_b=5<#+kzZpKTaYwJ6-Bk>{8;)K+`n4xZ
z0N-2kydgDp=+E5Hx7)l0e9rKn)P*6~Nm3-x|V^-7QZ_qvD=UdU=cz6>%D?8h0IQPd$NMC;ICahS+!Keu|
z5fKmB&AZcNo@hF+vr5MC7_lk*VmY3$ZAj@c%q=Qv8XZ*?Is@cktjMRZ-Dh;I+~$j1
zv1$)n7v)3I8wOGub>H)nji2lNilLA2P>kj#&`)k09Yyb+-af+ovLt^TFT<2gv=KLo
znn+(Wg(ZbjkRGJUO^wbg^!0o5wdJ#CTAjl+$$mQOokmh#bsq}rdMU%u&!?m?neqLm
zgx42MSjZoq_mpi9dpzZ|abs;BG^?v!jYo8kF5F(1(V<8TSJ!@sfO)i(_BP~j_w)B)MVTk{HQTOrEc2}YQ%ObP{@ozlSpo%XJsv{DN<2UF@CN*cdlqpXz%0`DF-dPu~
zcGI33nLd?=z{#<6!c79gvy4Al*#zdQUP)p~ztiWuoYdbKuxm1(zu
zR@G#69LtCsV@+(|$jmD=6#X~p(`NZAx9CEt-()jdt+yM^h+SMJykfx_W6CvQ&AN9x
zpS|CvD;BpnL&35Sdx*~~l(o1b^dW1VaP;e=&huAG=htuiYA%=5pr&MuIoan3$D#k=
zS=sZwc47=u%|9?g?rEtGLb}A3E+kXv)`mjnj8&@LubqVRY2vG+1f^B>oY~M+ojj!*Z4G9*$3BN;jQT|7K{DQG+5QK>)P`FhnKvjc-q%;8M-cAQE_s@!EwwJI8mNRPU&gwUpP
z?9)$tf@a+P&a)vOU1=gLJx>%!PsBNv72WohWU++#Ez0`2eoOt~j^uw9$|iA^E8#bP
zuLFOun$OU5SNfrfS&zQ!?@hsK+1tkHGYF-
zufu=Mbj9|ETV1>A2WwhWpLE&qx1_GpZ#QS`
zv$!}vD%`z!C2UDqM%guLMlGgGu
zwfd*f27dPQtFNz5l%G<#NLzdwKPzy}FJ;@X&f2275mO`j&Qe@`;pf-p_|K%vZTXwT
z-8|!71n8555EMT*E~R+4(T=khO0YebCbO58aK#^wNJw%n`P(P?_8V`z79`dB#u3F^3ldb@|HaXm1h?=6BE1FqL))I5wUtS4+apC+=cW4Pjdt%^v~R{KY7ySH4sFUS+|ln-JL|=Wt-)d$`~dm-1$wmkbC08
zjwUf>3eg1>3a4V6{O<4R-tEe>szvPU0jx%soN$oq&&z&%wzlC|0B4eikhUe+MuX
zAu@?Vp&8!=uIVuDHLGxJs7ape>FkV6Y?pdt=upnNvkBLd3({AXvxsV?^jhp&$b=PA
z9fnaQqGCsimns2gWd(ckN8d7RUNQhKU%uP~Vu&mDwzISI`1ttN))uf-i$8^%Xh^Fa
zcNbc)rAa@Qs2z@C(UrLWp~4N966vE%4_ZGr6h!E$L|a)zGEqNg#3IIlo(4&*sAl6D
z7#Q&I@Z{#_%Wl4BcI9_;a&q#izE^wizZkO|9TozBGBm+=*~QwWHc~B`|RXP@n(HA?Dr@lRF1)5fhbuy!f@f9eykXk5Ny$x{_NDw
zoLNm@daoish0As2D{bbRzr#FIi$wF-lDZRN_Pb%E$G?m3GO2rglJF20ar?F;+MwP;
zrU4rk0tppdsefmkMl~`35rxWb@=Oi;O9iWrY~WZt*N(w^J*StF8&!@%_y3M1#Gi=>
zg=Lw9C$LuR**CM_G`Z%Z(+Z|yXP;fG$;85x%rh8rL|E%zu?KU~D+SA&&e?xZM;BcOl!I5`N
zmq2U7op=+0_Y*_CMq&}OCb`|SL1=<(L-vM6p_Xg4-@gHI2Y0hd5{1`g4>9J>Q@j{R`{ZN}vU-oDu&mV{kZ(u(TQ~2YBvYC6?O&p0Vp8t&Ou*WqPRkgO@u^dHZ
zeRO0_BIOOXK{jQT<=;I9!E&tLF+WFizqV6wKlScQm3m$C^zX-HBK;8pwK6qwsZxP4
zF%8*j*T*X?uf%o|_zQXZ7t*Rks;F1sizw>jDjNFu`GxoVOU-{Q`MR-sZ8~)6+1!#f
z@YQ4#c+{`>voinaL(0ES!?_d-5HcSv4m+n*Jw*Sjd
zTgebM_V&$&ED1nw6Zq=|l0hy??D-7pL}b$6Hx2tK9*4TJvLb3|m_~3ltg+3I63bxGOz}1ZWf*4Gb@x;WVdv|tZdGG%Hw#Opl6BCvoUI_UU=mq^MY;MN
zQF@^v|1<9I;?>tQ|Gia1z1V*xrvHZ|@n89;b&cvf@XtWH2Yve{tti5m4^;yBSf~A!
zc@6A;0|brgKN%xiQ4B~Y$QXX_)`l4$KY4Qf=1rQ-guk!xD)Ooa5Eu8(CX!PmqVr9L
z=n@hWOI=UdjEs!tHIhzSTUxGz#Np!VN}JM25vG2KAu(F!qO4l3rs^uOGpGg>dbJYV
zgK53M=H9(~@2ss8VHk!i1hCB=@s_KkvC#x&KKuBPLL2+=VYG17|IJOfDJanS`Kx#6
zf&gd8QWrQQ1ecGFkAM8LMDrRUr=pnyGmDGD4h|)wDo*{Yrf*to-Mf>r+q{zt%aaF&
z8ssqsJx9won=25tKPF+!T`DuDTvLI1DiKxRV5*P;6Eo>v
z^TVYLO5K_e_Dx`L*wU`3sNitFaPc{aDI%oogM1EmLG4MzUT~Qsmx1LL(GLGsXxh*;yb>orltmb#HYl>w~n~;T8&*@
zUx8XD5R`c>A)z->W_y!><#W9I#RC$O_#L{*>OsITMuS-o+|RZ#Koq0ioJkXJ9)k@B
zB$<+yYvYfX-Vy8;aq48HSP;7-K$Ib%06M32dI=Alb2XC;9{z$&B@@@0edutorowl=
zCr77VBAqZz&&3rrQeaTvdTIwsNAproqT^a#Gh(>XncaG^bH3$iye?=04b9EF?aW;a
zR&%de%@TWlN3ohH!|Z`pxVJeKfA@*d^x9gx|3h|o<2Uf!ER}*!z{2D^Y`-xzGgE;p
z;fGHc83T!Vof!1n@hd7TH%-RIeG(I&K{o?Ru-Hr!d**l+?mW2eHk=
zF@GSq_2uHk2f1DVgxQ5NC=gZccd28=WNys?J~Pq*RShr
zjF;N1^j~*(cSnjJz-u_}cCaI}0{_L)lPAIfx)5F~_M}w4RztS*CIJBf@>$AI69^G*
z7sm!wM>CiQ8xxu4yFJ1rfsv6dzl{2BNYop$MEv(W{v?w
z)7efNJ+zM9Wws_KhnrHMC?kUj0k4LpeR*--pC&W=Bhwj}cUZv3)YQI;*{X~X<)Dgg
z@9*pFuMG5OtA&5%JNXQ(jN93^aFO|hsH!RfNPGrM-9J_sy~bxxChVv3U@K=RX15k}
z@wMHT;z6m&d)usBWcuK#R9uoqx!o6hMpS-&egp)qN|g&Ix7}KeHbZxc#KTXwiF|W%
zEb`X(bK?!5bHaJ1$N*Cg*sgP~CHOiPR;u|#nf_QTpKDa3AD;Ex@4KqM3~T4x!ngPK
z}f5f||
zbnzi~{*Q4XM2>o?!1Lz_)ph=2(gWB!T5iXJ=cl_}{EPcTS_T6d3N}mK@0Bzi(L8Wz
zRo^YNKY+Iq00y&veaiJvLtR}RQ35hGvq7isOoI;@1qGAk%qzH$RQ;!0C|ldxKVVZ-
zoNkmGKQ(L%qqSKbl#s|#D>m16KRrF|%hwZ|nVoHO#qK6!W)6#
zjY=JCiQ`T8OTW-ioGJH9UJ{or6lVQ)B;LL}TjWRXxJYJ$pqLosJ|{Lrgv7;l!t&mr
zsRq71lv+Xd@?t(*>uAQ0-eEPn7@x<10iyAT`{e~t^$(<^q!e>BJI;=GBe`s;>FDUx
zt6U!7;o(6=Rw*>b>EhYFDHcXkJ2ge3rKKe+E8AMRalE|m1pn9GS*uc@&uG~FNYiPV
zoLVvKa{^_><;5wo&x5@4Ei|+dng3vDM^l#_l6+sXNGLk|dKNK@!Ah!n|Kuvy$j?rh(LI}aZC6cp?djjihNcR}V6G&H27
zRsHoHYz!c3?KG3PN1%DQpYP*A<`T9i;)*%`=K8w2B}fFDRi||D)P?RJFZY&uJRx&NqtQb?(*~j(sg|AW
zui~A=x#6#>ML0XsN6wjjg^Y=Nd<=N4E
zZ6Boat%KUz35BZ_MBr;!SQy|f$;4)*LSHA*B7;=C6y8>|O~8^}4CQrIWS
zPL0SWdT(XG53s@(1e(>!o)NM08YAfMAB7Sbqeaycg>E5GuUr{8^@4SWAaYDjPR?`M
zH#Q!~(6i$!hDn0uSFhAsfz6qvocE#L8!HpVN?ZF;wrb%_a=2y!#2Srqu1C0*TO@ov
z@PPGJm#_e(5|&f36448J0GXO$?u;hNSRb;PYA>`$t}K07o_VRPtQ@$$Z^6-3WHBY#
z6~{Z^Y;=0K*#&Fhu$&y;U!(!m<85<*LRUH5G$uEffz`kI;=~V%oaIVC6{`mj82Up$
zHS`_l`SlNb`2e~Cxa%pmw*cQt#~=}}QwXF`OIWA<^w!A8NEawI$YMDRxk_j8Xh_Sw
zV>s`vBPg&Q<7`r*qHiwFj`c6`Ad?FMupOwDh#zpA>cdiV^|OL}E?XTgj%T$~ENmH%
zG!)SYm!f&RfB*jZ1`D&?Qzi@VLF^EelJat9NJprBcz&mgW>E>((+*M0$BMFRJP}a9
zMMOoZS^UYx!g6(6?oGO1x@x)|>&?u}EPMvRF9siOwgP{#N
z6cR#*GDDq92UIfD+&IUD2ax?+nY(yidfz58>`iL9Qb|uuN%`AUG*e{qsYg>&6NJr6
zY3Xle?V$64$hQlxiqZ}90_{U1YywBXcTj{pxlUvwynk#QU%7x{!DunbQ*ih>Iy!o!
zp6T)9$A{icU}IOTTg4__T?0ym!<8&iYF
zK|GSl%jf=6tC@!D5Mi%Y2eU2Ty`wCuBtF=s0ecgaA7Y7jD}#f&}d5rfdRg%0xRP1X?SDWEn99Ful~{2A6)~3%r;S`sC|b1
zVzQ0-N?l{`vvteje-@HhVjdU4X59Z78Nr8B?(s1DxZKJ4K*+mn)nagPbK~JBKf4ZD
z78Mn>#Adly1MMBK^zz)-JenwWsHydwq}vWCR{tTu#74#=+Q)tqX#v=pq!fUmXM3~J^kCYb>)JQqi^
zXWPvrggg%4pyn{?)LmWZj1_|of+^8AI9N2kYVIVEpqQB~n=C5-Q~fyLg^ZmY8w5C7
z7&j-U&fB*y;oI%)ZLi5{Ia;Xn
zUUuuFNPM|ZG6GqJ(|X|xS7V07R233>LS+kuFEBeigjW$SHlOGZY?$HT;@W`;Wp+7w
zhg8PKOBkWG8Lw~*#?x}i`+W|Ww#FCdSsJ)`?Ck8&em*ZRDY-*T+zEG22P{xJj)#kl
z-LUni{uq$6(a=vKxC&nK?*033*(!@TfF+fOtqbHe_OYuof&VAfLSvasMcO#%bqLr@
ze?z6+SgxwiUszrBtE}Wrm5NtP-2!9BZ9(UGOuxXCgnG-JVB06Ay?Q}`ICFvlol%k9|)VR2z$GQbEzE}P$`T(bc5mY^==
zp=ir&F7ISnK)!oFK0@!fYJGlUdz+Z&cLd|x_ExRYLs=Q$lQVFE>TtI1i#1Ry{cH!$
zTBXv76-tExWbCU~uOcN)x65;T2-NrE?#i$`=U`vyLntPtruG$?sTJJK;dVS(PPMeQ
zo`qCnaI&}DpDyRk=X!!&-UXLRDk|dMe=12vN%`3z&hZnn#;s!9Cj1SxAD`ci2bdVL
zM~}YS+_L-%J!;bjG|b@&N9Oa>!&GqQLF>(Ezo9-_Y#}8tj}7z5w3_SW=(r6T$PzqW
z#)DZcu-Ea~%?Sqw2U!feJ(iXXA>B4k*LwN{1T3V+yCF*``1_EB3JRB)YuBIinT3TW
z!10c!>&3|C2-aO6OkzpN$?1n!bSn8*f#g0rKL^9+ABf78{TrOZ!+zI-yCD?bc$KZwNSusWUUV8Jpt4`2NwY?5%QLaaT2$1U4IF$J
zJFMAUcJXB0*qGY+n-gf_aLYIA-#~9>9L)~T1jGP|vu&Xx+HWE!_izsoS8HLFLWaD~
z=r1FrBrYDo@V=(zIcTsd`MTG;cn@Q^P#x$ulM}EH|G|J(A*aQ!$3{lB<1f#=k(j>G
zd~tcX1y&}K$IpdSZ>qqb^m`&HbU)h8N
zadtHOwK44U1sJlm*M{@jqgegEeY*n)Rypqt8qfxMuw9`s_A!We4Ldq=u#HDLI*fYB
zp#6vhD+Ykrrh$P_s3yCOcv{j0`VzJ$@sg?aU;}~tcLxVYut2{PgLMtRuBk5{?uH`-xX{mn}tu@pG&>H|dgDkxx<;@Q56ME2jfW9@1`pt&537$EyU
z`$~R;nw^5MYYE{D?URI)6BqCk2Jn6>yq}N`I%q*@G|ECgf4&A?ZQVE0mmcGo
zPaD@y8IZ_&btuPRBwsH}wGi3P=K;>-b}Mz(*K@2vVZl$#JfjG*bXgd
z7qsj@0#qlL&!xvpWkY88kkv~0_FtV0M_K1&@>dXm%;uNJM50*b&;<9uH*YqdI)x~!
zs33Pd%$_czEnD7P8NXrg;Cy}W=tS>|$(xMgWo~>Rta&QlynTyF0}X)YvFqK6zcep~
znDe3pJnJ8dkLSjJ2}j#o<1hk2eV<&F*CzXna#@P9Wc;t$N>j
zc<{XK05*M^qwuR&Jlz$$qrm+^tI;?#geNR444L^lP+dE7E&5}$T2(QSgrU4@g>dBn
z$>j;(F#=5ssns?|(4Dfep&_lg5GY*zRiMdJeyRaEppoi2--=Us4kcfwXagohzVBkA
zd~+lH_9ewL#VqMnGbc_iuHNr2uE0lx0C%VNx++h`1C^Z1ZL@Ue<)@oJ0D^FEa7ZVM
z;*{BMc0xi1O=4c7f=Nl;{dZ5#-B`(H=!x5Y=+=>0Xo;ux>41GXHT7w2Z7ncPAD}xA
zx4%o020b7swM$ScOXn_YuGB!SQayJCpo=!Rs1atbCYA^Iugqq-4tflquV0_SDT#;(
zd_ZT7Pvc$Q6lE`8*=(b)rm=N{vhO-~rRJ21i%Zq48XDMedrM+faKglJ+Xo=0=VUbp
ze)RFqOyEA?G-SzkJ6V1Tq2JQkiPWeeWqK~gIR}J0*hY#0o-(CO$!E~Z56a;TL)6l;U9m$oNiHTS`$N?g8
z$GeoR9w=AfkO&_FN-SM)#pWQm)Ngx$aUI%c@)s{&G&D9^f(YXVkzhp?Od0$A`y*R<
zMh=dW7%@3M-~l>d8aZ8#7@!k}J|^Gg*h;@E&fni3BnnU>iK&qNE^r=*;n7hf@dVfm
z76S6@)MM^pNNA1P4AYB?N|Sk0xGe=F!`nn$*rgW0
zxPYbvbODCaw6JvP_!UEf5iv2bSwI>J;qkoAVY`c6SGM})q;#N(`Z<>O76TcRAo@Z~
zB5^2O=bu2i!Uj~qb2#S#Z4;2GVo*48i;E3lRVjVzV1-2i#=Wz(ZS3gqhs3+HR?uYy
zv5FrNpqhq6)DH@q_Z&-(SJIU)Y}jttNVz6vG5vXN_X{3C6oge(Rl{Rra+OZD6yg!jRaHklO_J{I92}&;O^}-n<^{Dh@jaZQ
zx%!BTO2q7I#S>30*Zt74GkDR)1*988&7x8e=oVO}=CQG8(wCpG9lCf$7t`(tU@v%L
z@kzVK14IDgG#M%~P~xA8C!PGDdz)EX%Q8pG6cBaLYf7iC!237P&CR8a&BeM^!!J$_
z*8RYFgMxyBrcW`y?!7)<8VutBYWLNXc12H*h9d`?~>@vu}pA3&QnnvVRRr$E|4Qy4$GxwF5Y42=Y^MBiUEcLu@P
zruV>{GK1OF?RXw<5?ERUsA=#Fx#G65oH&?KR^$E}IK;GG?CtHn3lS(=YK26n+lPk+
z0PX-cWEzG1lEPSMc7_cN%-$6w9zHuef{wQ1&235*Q`FM)uR!iWkNFy|;Dlx|vUpkJ
zA**qS)xp{bY7Q{__o@LPWH0TX+^eputHXYd5>o4lk=Zuoyvb{O43p454=<|7Lg0@|
z3GJS5d3kx+1oR~R>t%N983F?$RpxEevsnIj(0
zr=fw1i<<&xr^sPt)8Q;y8P8@LoCboa`5IN7ot+(3ot~9q22F!%LJ{Oac$R9m;sYk#
z&-p+R%>yr_KL(&5N0s`SIS*%>?HU`
zuRVfsB_FU%=5cvh8MQ|*z;3~up=Tf|$lGwmqEa3l9QcCF=TrgIx2@7*)4792m5WNY
zFi>e7gY&$LgFiJgk@tbVafPX`6dD0?TNPj*aF8z*!;$xU7L4X^!#dfgToRGj!LL@B
zKy^bsg(fbuCLc%?E!s_J6>wntph?3(+gk@<+s_}YkIhd`0)|s)V{tvPv2}!=X22>n
zHT9WOr6wFEODS824j{#x8+h=0mHglf3xxxIX>HFGGLqu_h>3|?k9QX@aTe1m=E&(C
zCstRXGOQTrfHo5f+5Jti)-S`r6V8as{kdQ%YB+i1t7&mDvI?sNnh$bfHHx5Ku}aAC
zw>LEj^(@=ypFe`!H4lScDWb1BN>Hh8@3-$YF4RA71)>~VwF&yaurz+%mFQa!nC7Ym
zc$t~y($!1zQ7O4%C4pd(0WzWEL^SMz`X_x@T-?tC+U%^X5HRQ#yjA)kfL8F<28gVA
z*v7{0l$V*1yF}I1V-kkKE2NZ$w|^?CblCc?LP995uAwnt
zGgfF~ue4qQxw721=Z4=laCgZeZP&CxvNZ$(1A*j(Np9#kQGS65bdu8rnh**r9JE6+
zZ!Ru8kd7Dq*(VX&K*g$;ysM=-2g7(@hQc#o-F!ZO{_c8uu%dt@{vZ$C`l(ed37-Ms
zHUVgR!7$s&3da~;XWZYhow#t+3APgWDJY&j6QdQrID^*e74Ut)qa&sP+k!B09ALJ)
z5N(k#V9ZG1kK~mqw(2n9y}gm=yg++J!192(%=qE)$q588Xoc<3>{uQ!y|5isdV706
zLnjQ(gM6mqEf1uq>j4VV*2U>&rlYr)*N>sqV`$QEgotI*p2%~)n?^WQ65HF`LvIAm
zDsqrpKws&FgLK@3$2y9TDuBs`YNEF_U5hXs{pAC);uZ*oc|c{^j=^~fA>P~2^xBJa
z1BZz$wXlP2N1(AhhJ`@h9awyiW7oX<6ylG_$tH86X83nwm1OM8|+&P%AQ}%GH83K?+#SUrB@$
zdk|jyU8LOH(XjP`!LwI&vhs|z3*G_s-O-lLzWWsSW<@;=30yLXkfQPZh2XWk6DSFJ
zo#bPSRG|hz_N#~Mhd~pC4vc&Si}!Pxu#iv&b2I$rB^)yXChiOvUY^sn0zqPoK*&~M
zP|HzE578H5y)fSGE=VBD7%&|$pys~5V4!<54h!f2=0e8Vopjzrfo`-BntseVIrVtD
zYS6FYRG=7q0C*>Kr0}Avt2?e>x&!A`v>EUb;8j2YG7iN1v~dHo(6OQC5h?Fel9FTo2fMf*{N#txo-4X+84=K4Ld=nyU>m;hq57YpzdR15RLlIQi
zTpok7lu&f$kw>gRq6g9Gj+WNcfLbEq-9zzi8uJosyj+X>N72O32Alyh3YZSW9clP$Mnn-hi?J?|v4bF8W
z{(wiuxgMz4(m4YfbDZ^OM^YuQTId&?7H|*|saJnphr^5|EWw(JVP^5nzg4Wls3M?q(ctJ?)$ntR#!Y${_(~86m2xDp4dl1T`jEr}G
zpKJ9)b_NL7@2CHbTac+5))Z>nb2w}|&^1u;KGg>V&drKlK%ZfdFVfp2#-dIX5x^NI
z0I=uT@0T-f<_A**G}WxyAIH7Newj}YKK?!X@?&zwwdmMLY(i=6ZG!O3rkpBj)8
zfWt&;oypXx$u0b5OgYd9BX{Ya)m1UT6u>_kyQe`1ev_C79}5c$VA)4N@DOgvWlI`i#78f+#pB#g
zjNm?3=QLk13SOkxQm~tkOS@xXVm_gv$$GAO9Iv_j=g$kkgY9AdB>Zte99Tnk0#i&n
z9UMiSfwHEQczPy&NbTt0Fb!3OuFIKu8_4p39s&dqwEKq@Pl(7}&s>3zH5|ymf%XQt
z&d-p}qJX!#0(mfln~!M_^m{1CaPn1V>E!G%WP-LY^ky*Zk}b$S1t7e+o$o8z##Q_K
z-vWJC2l%7Ikg3X6=bKDSOzndcRjz91mAM#bpeV?k^KLSBy4z;30HyMNJdmE7n;Q-U
zrpqLLDx0*y%37`fo4brBSUMaEI5#M;PJ`9yH7j1Lsz#llXoQ?CD$9t+V}TjhOOueX
zu({qX;09?!W$PF{z;dv$;f}Ph>}$s$;BN=~=kEfT?QDi0esGw)2=_6fxO=u+U6@Z$yu5HD4F_EcqhJ=ee))G+AGO@S3(|No7)ESO2s_>VU
z43j|xsvQ2t&W1cy4c_o>CO*n}TsL%}fU{JXAK8zNN%5awogxg-MNkGIygA@BFW3W!z-R=gvw?9tdNUP~at1CbJzSbOqK-CxFP6<<
zpN$1y!Wtl*`lrmy%#5b0Do1WLI7)>j^zvEGd?Yxl1nqvj%oej!(}ImaC1_{*tOfwT^tUhc40rJGFe2jLQrNHbh3cw7
z_Py6k0W>a%q*o$3aQj#H^|9iqu;#fg3<;LSL`7vc3!sF=k3?sIX6@kMfGD+i+!uTf
zU9j`P+7IX~&{DK<^&u&(O_YZNEJE6tm;pxaXS}E4qW@x)kqSr2fpmct@uO8MN_*J`
zuz+rl>{&L-74$QH=R+dg#zOXYXP7h+HIIm7zBSyiyZaCq8|SSLxt)K{Fec>+tDS#S
zn9`F$bNxlIAibAhY*9G3FTKkmSZ|a|f(V7t@B#C-n;425q*Z{z+68MBS3Q^*-0lTepn*)9xbEbT5I-
zI%uWHQ=(A)Dht=E>F(dZ-xNS31|-z`{slfGdP;E#iO|(k&;*d|DKz}a)h3eQ&3xbQ
z{2Ph$u{WMPc>=0nkyZQTJ~FPXUXDI}ga8juJN&vH#*7>Y@cj+Iu9TE;fD!2@&!rMb
z>EH$dwqLd55wo*Vj2d8xlrwj5k&7+>0C^9#1j_S;-jn$k##*(I9PR;B0B@`!3VY_6
zwEkpMRdaLmeSG}ava!!OZ<)zgRQULs5h&(zNvv52+V%(}eb9UmKU+#?tK^>~%EG(C(y8=kFqTzJs
zqI$v5P+Uouc+uC&1=HA1V8*63*G&1{s1Gm|^Oy1a_wEIeP6C-05@@%#x5omF6akpx
z^&)=-y7dahLy2z94S;e^ohZi1!C~lsuM7;EHxn7u}q
z*K*6kuf_`uqynl%V2c(I_OyFj@k8SRV*YRHvr;
z`YEhjJ#DK%XM_vEadP;}AV0pm2`!3&NoIGe7O?vfVILjZn=VjEIah
zLP$W97Mcs%iav)X03ijT)(`!GbbSG=-$A?YJe=anlzD!*IhFGf=2PwZW?$;p!8Em#
zlM~e;d&xwPlxqF<_V!;whXfq&rk$+=TFHXNndX~V^jbq`giEfn0GR1dgW&8#t9id$byZDsTYV2WB6cDHJvoP55N|g
zs$1yk=i9LP!bvreFchFBP)&`!y+J^KA_Zm;LjjL;-K5s=9szg&qEA#|-Qdd;$W{
z>~X-T7Gfs+#@gB%H0mz^S;<91YP!3>Jr?xsP7uTd`5!A;@kud_ddW9%A;=UOD?m<{
zPM3%7q8I##zy>A_C|T8bpnzSC_N5#eEHqhJFE#EInu%f
zuh`YCR5LOX2|G@@$W%pKTpTc{0hsq9qN7b3VJL~9LWR(2M8O&dhJ?IImrce8%@3r{
z_C~G6eZU)m&|dcac!>c-j7++`vZ^XXO+5h7{Ix4=;7|p=>^*>%g@pyA1!6Tj(Z}aH
zAR*w^Xr=f}H$c!tO1^-tfvX^AW5b1fg49OANDK~2r&S{d4H&E3&|2LFT}i2
zpx^d^^yG_6>l+w&7fhru7vPg8xPeD^Gs*7m@p|zTlJf;_2LHhW8JGl<_PPG~ThCA2hqA9B3=PO<(ra#@d^X`u}WKttfy%u~cZyI6zhl+20b
zSdTyw-r*GdM_`#zs1%Je2WJ-2g!h_goZPiI)4maTx9si~(UYodBV^E4yZ$}?Brr!r
z_4m-xD-=@yATI|I0Oit|1IMv9|Iu=`(KX4X*a7cn2Oje4%3k?^p8h?VFNqoI%z6;jQRqGy%ssVo@bC?sQE%gcuho{9ZJHh@|-
zoM*Z!8z0=)4^ItM<a!h8u|O3Z9et
zi3uXGWr2<6dxPW29u>g71+YV?2K)Jm#ii^^RCbo8v^lzh84Z98WWg1fd)UWd{G@w%
z1VI39mSo5@z|+P;n1n;H@50BGRNd{sl!x@BLPB~1X<)1blDPq}p{hDS`XOC}VCeXC
z{XRuERBzZ?NM!+9tKOiypL01ER?+i0Z5xpV?3+VtyfY_ZA
zV}Vl&X9MnffRoFRC>m13m3M&ZAzTE%67vF=)|p#D=U5Q4lfh~Bqyv-yWG2tc%PT#;
zU%{kMWzLJ?ps|o&mv7Fz}*{r;Zezcgw0Cs
zk7Va11QW@Q%f;_47_h&_+J
zWZ0lV#TqrE_PdQ4kh&$NcSCkXc*%A5+)h`Xl!SXMU2Je|u)UjH#oSMwmoGC|Q&)EE
z`kIE&AFFrU_8i?iA*9~FJflFv&v{daG<7PXS!s~B%saIcX)PCbE1%|fHi@72y|B?*HsRu%BP8OTU0{FlY2v-=%8O--J*5|~
z+GDJ+(cHV}b?XnlM!r5iJCG>D0d<+FqtlT?KKJs6h4beJ?kGx~(&Eap6~HkF0dm^a
z*GyerH}aB22%wu=JK<$2N_9!=8$t{CGIsrp=vwwwDdXudCJj_qOiv3SH-+P-vGy0_
z%J3)SzwV3S9iODlQc1oxrtA-D?s*gm?{=H^34Duxr(jt@Wtc~50ApU-)kW7#AH{cs
zb6+93Z5a3Y=g%d2da{J*q^U1Kg^fZ4eo%4m-Wr+em*L&?3$JLES747UZbFtB{72?x
zUfz>Ok6hr%TD_eyPDVxspK>u8f1NklS4xRp&8(r!soBP6Vp{5_@eZzEfbWo28uc9mC
zG%QD+8>HGzPt|EbfoJX4V%<(=FOTF0fmuBpt5`ncl#@uYL5Q+lsi
z<>YcrdYt{H=b`$&?>O6-w7BGcw(T>%%~l!XmXl2x0A240V-ab)o4@!=t=K3wP6fQ@PE^xO<
zhDnex{yZD|GwSJ>43$HSLW0YZGIcTP%x(-hvi;zODFUa)phmXYsZcqyF`IjNM&MJ1YWly(=$0a
zIcI%Zdirr{ZzV$4jfRKM^mX`NKX}QC$cYzMemytw=A%`iDYeSY-Qt#Hm~N;({pEE1
z+$dddO`9v;`%?GW7vKNhDXv4y)HQEkr}ufQVY;K_zQor1Pw|(`;C^@Z9hqC&cW3=)
zi?dDlPP}VvwJi6tvSiNa(hc#-2mc
zC}dt713Q6?OF#R6#@`^6VKgtx;=ee)Tpq8yzouDrF;(-m;b(ZqE9;X7V8yb`i^)XY
zlFAS8_Kg#oHqF-9gRt^UrR|vmZNsZT1b$I=nw8f`S0q$Hwkn7oL~w=t#8>
z8$WqIW&@P6_W0(!Mi}U9sXUi~yqeUn`LXNnsYko)H$s^qoG0<_M&|H!b#UNb$>8-z
z=LSF1n@THP+$6L&OcUh4-Cpv`Ph-nmI4FTa!CDKcU`k5Lq5$dh)AMpJhN~uoEcmAR
z+d3ecf_imFk+!Rh!kHq4(N$+5|2a!gsvksq&6zd_;(=!K*;tRTEd`6tC0gzg%5(Ux
zaWJPCM>PVig54cKF!`YzellQ
zuEu3QgOwc}Pv!S0G03<$-H#4u2d&fAty_iU7FcfsLKGQ!HHp)NZTXu=mz0#u!W{92
zsn|7j&FtB20807e=R?1OqKF=a;Da4B+GExwM)(g73kwr&B7Tan(1{%?VrHRYbag$@
zYbslvqfxYY9(ZucM8~z+8nb523RQUU?P4CLx33mACa%9@W8vE8_4iSn|H7+@YVksG
znJYTvlCrWVqEede%7W;DnR^J9CFmLiqQ-Qp5F%4N^hdIXC*n%!0a*ZCdU;t}p^^Cg
z{jE^Yh=f-T5F|%5#)oOlHpdc5d3!=Z7~@Fo7n$VFps
zcnqo$zZt$m*|DL%zN+|9C#Zm3qa@|N6JHLE_4STlp#$1vyUFrM;mPHnTJeR-Uq`0{
zBi~3>)U=-uGUhs>X*}ZH*>mP>jL1*kUo(dGJLB(6g}!|^<(HLj(dTO@+?tgI|#*jGvP6f3KAzK-I7-`!|{pT#SD
zp?CI9GG48Ky^SXo&XFM;f^^TDGp)!j1X2D-%P1NgND$?Q>G;;bs&&x2wC~sv22l9m
zKi!Ir-4%m5>4G;9ad%nS6q+G+6=T#=UW$Z1>ZDF+&c5_I%1lv3MK&V!apPwl@Z)U=
zQz*!TySjfhr5^=H+Y#O)9t>Z-d_+(1JNvrQbKs(zxxiqV`Zv96oN*8L
z(Y0;wCJ1Z;$t={6DQvoZb6puKmxUv@ugKP5Tz8ULyZ>ila;(OTOnU#y>gx3PzwyPKm&c$Y`77`CzW-WbW{
zSC-Ky9DoZDg?9#Q(s@G(7VVp$HB;r}nkOpjFqa_5d2MNac0&DTx|JI@Yk2jgEu1An
zVF=MwTCTZq&U)Q|s$r!^?drE5@J)U7+B@#(rrQJRk~JM;_2(Nzy_)&>=1Yr-rSWwt
zD*20zJL~rt|
zy85Nv?Tjipnj^1KtM#Mis@^U0J$$$|<=qS)XIdPQidW5^bYX$su#^lQG^m!QeB7t*
zbDE!yQl*)cM*e%oY|fD}cH`DoUp_xpO%2hp@~n$AV5ixX-Fx+#3$MTX-HSE9vnEP)
z?$l{k@C$hCbVxxc=|_(r3mfFV-(R#gY#71;lL~rAml%g}Pgst+_KY7pOZ2$UYMHsw)k
zpe+)XuYorT^U)Tj@wL^*cY0O2=H=$`9Er$hD?dH!{Jq4Bzo2L`70
zAxbLcrI%*AW)BYO>5%R{GF-;$09Iy!3-TcWRde=D^-enf$WOQt1fKCkUv=#rXOPF3
zn3(hKRpwS!T?0dAj2d-9bbP{i&Rc^Rf#MXJaJ&392JCvjyVN~Eq9QSkTjb2%^80W6
z82Q!H#{pYJVmEzw2~_
z%%1tz-(u*@6?H2V{J;PB`r+m+`u)RK8;S&`COV&y^S=pH&QCDx=m=mh=
zq4UU*Be6cG;!Y8+ePIAFN>~!r;b3U!P(TAi194)9v-E`=fn!o&3rs}ki|!s2li{RE
zJ|a*ONEa|neS8O7zqT5EW*-~o66@H~XVbV%Q&xO``ohuU^V`4U%mS@ax~_FFzMmw$
z=>3dtMrKBrr!04Id={Twl+Hjb$QE%Z@PagHOJudVn7hq
zQDIdpDmwjjuUg!7_AZ{KIzFfP?~TtJYr?)6;Ih@nZBbegKjAmB(3g>q1u!T8Hqrk8
z69bpbrURJ-#eA{L3i2?EGn)`DrL`+SZ5X0a!D|-denSRoWOE9)IQ$q(k&iwTL{hJ5
zcc$gx^-bjr6!;D^<|W|C@A)?tZfWx!IzcSAv%vBec_+VDMGva67~e<4AYhru0_b?o
zJV}F%t)ldn7|+<%BhqW@>sv(*cFNDsFZcRq27Ec8Kw}?blw4FWY7b7Fo3|o^RFC;#
z8e=aR*EhYd_+1~}dt%G3W#QT%@0KSNI+#(o7#in>>kgF&HJ3lAi?>whw`KpO1sJ(9
zsWkVWXr?2FHXyK^Qgc%J)?U?;O$DIDzklXQj_oaMu5LJb;^afloi`6xg(>=HU8#>N
z_?D`e0tJYQ*dxnj#{3_N1?W{1d_>F#F!Z{@h6}TLSG1WvefpmDZ!;qYhew#Mb!o|M
z=$hnhUp;lT)sR7h4$-}-(JZYru*HFMeuo?b;hLHc?@M+D~r%zHgt80ImiNUIHLES*e8Pkm$
zUHtrvZvM&&n4wEE2f39I?A1Z(;WxlHa=)iMY4DZRPV~@xn0Pg&?nT%0JtT2Enj7sv
zR9PA66(RO|7-v+mh&mA)6jq(hkFmB^=a9`P`RB)=ku<+qqoL$%t9an4U6FrPu%xBN
zNWS;aIlI)03lq+`_6hA5x$3>ENx%xbk>9f2`T3Q6M4G3*Y0Z
zy&dGVk4)TvAc|_ugVSz*I!g0X;*KSCFDe{54IcAVTC2$~@s-VqLfaE>r#73~R$sQ-
zJyq4Sm2oe=d8O9H%8EnZeY?$%)1`fy|8uSOk*=9;7kg)onX&KJ_VFX@HHHl>{qeKq
zQ}&i$ug(+}mQPS0{NIngHr6w*IWsP`;@K9f@S_%cLL@$&*LG<dyiPPfB&*8+l(@1)D7vX|8L;b?4R|gwn%neD$4xZ^7r;V=j{sF58jnU
z=9mt7^=?U;EM0xYpYFA7`KgD$cWF%C|5vuY^xUdx2Xsz{3`sj(fB3PBb7k#Etuc!)
z=-gi5sa#Ugwa&(=P@``5rA}f!9$8tV)T`C*XC7gkRre~o&C8R$V<$5v;MTHx*25hN
z?>Sj+8)m-f$(HD*%mq@F8f*R4UB2!5T>0x|IC^ih+E*(hE-%+AeBUqXW%z=E+n&Yr
zQ$>DOwpTZGT{m^Fj?%xZ#*rGQVWodMI;qN;O;(uJHnzUt$_IyIpC8vx@{aNt7T4@t
zw|xB5b6xL~3J|0pseXahQ<0`vWRZY>wiVI&5z9yXgO{;VJQQcZ2`9g@v1~S~a&0Mrt*qYU4O+}JiL8(*SD|BmX|y>vHc?JGJH{BPTZzx)9ub|Io@mI
zofs-9EVb9OCr{jjbP1+lwnyI+qNg?wtd@K)VMmWPUhM8?GCSEJ};D#75t?@aeR*TP-z7|sB<=*ve_Gz
z8z#X5%Rb|#u09hmy<|i9l1W2{nHq+7d3?OLnej%Kq$JIRn^t9wsV*TbrlcUHwo+2^
z(rqdiXtyeRa&mBU{hvj;nslwgkjv05PlORX+a>50VKzfBZ?AgwUbb`!eaBTMvuPi~
zL=k!D_OW!@8hiU`LU~U=40Co?v_l}00*N;@y(DMFkwG4S6!uKi7Bny`zu<~waMAI9
z^JW!Tz+(<#(PuF$1@VT!7@99U^+>Y3UtuzPQ}KYUpUPj9da0DT&ka`kojuqqCa-Tp
z&brlO&M8(!cZj%sd$PdOZPqcDyn%`s5I%q6*r1@0hYyV{jGdnOPYw)sD{ve>ZvTFn
zPCZi}=A2>r4-5}?)0!!9acEYEdY5oJVO!tO)~(d9yv)crfj(KqF=6XlIsBtkhMAmd
zNa!-t*Zv!iIH)yV69|pqS8g$UKS3A$i>VJ4Ar*8S!WY1Im|5Pbj_E9p<{PDa#}r_7
z+uvWhp-Sj6VtIRsnVDJGqVw87C2A8VW;gU-RAw+@4Cq|8nMcq$`|h#s&*Uw~?9!Au
zp*cb;^sL$SHM$OeVloo?q#wF+b(_Em+aHE{hq>4X*|9hTqgal$_*HBaoZI2{YI6_qhx}6t3DrT(D4B9r)`+K
zyU*QQr_P+IO7@%^svWDO8q*vv?jHmPp4I>08J$%zzLO
zFz^jfu_DnB-<55>
z4*_pXt^`!WixFIw){dVbvSGYR_7G}A3xk^YgxrjwLtR)mRiN7X71Dyn(#Fw0tEn0Tb5|M6Ho5f!sHn|RHPq=!8g^&r@-KS
zqeYrwem6Q}$ypz>Cu%x6+Swm2po)%u_OM_;r{29eRvA}xCT-s?FE#eOf5zKw!k7yu
zg-qiJOzHDLE!sPEXnm~sPeI|~D1)zGWM{%i>Vl;cXg35q7X29bXD1;mWfZ8x_v!ry
z_{0a^;p6Aef77IA(t49h7uMUTzRt=}(nfxqxs1RHAe|wOWT0tJ6p0{};zSLdOkA0NdHST{|}!
zyZdrObwt7wPs;L!FC&IOen+1Ur#oF=JI~>1!mvONq%h3Ih9=0ggi7%3m`$dp+UM0bN|e{$5y(R8
zk0`N)dXG>Aq!S%yqLF6B3RiDh)zrN|p~#0#2^|~PI@roYOFhut{q=}Ov`Z&`d{_rJRE8?_oNf1Vk6vPWAhN&1WM7L3yk!F4!xez)Y#}%
zEif!5OA*2XoB5Eg9n!}X2Ri9O%a|L{w0vPYWB|RNlOyNE_?5!|y(sTLr)&DI+9FI);>Q$?RzMh0F);h@ih#QU;@Jh<7uZQHU)az|Ea)$3E
zrFVU%oSfo%D|xDk>81OT@v8OpFG7aI4pvI);i0%<$j@tv!_E#>yZP1g&%W_><774`
zy^`FyeOtv!k`w0N(eW*Ak%2h)I(6!_arO&9;*AjojTDFi+Y^T2W4~g64t-a3h
zWI&L2&lR|IngMw|iM@&&w`soQSYv{5SQPXdP{TnksAB3G%unOc=8Gfk38s>)VKhrG
zefd{IQ*(fmz)?`Y!NK3!S<)89B8;~f+hjDeT>3RKF`1U^#b4i@|MvB3(T^XCSFMtl
zP=fqtI9{Z1RI^PF9Xb@hef}NDk^8g@Y-R~B0PmG%eSOPa*!*d>RiBQkt8ZKK??$QB
zquidL5($`C>?ZuImwKcSS95#!w-v6bD|xbrR&_Kdj2?B5^+!Q><2ov&1lvB%)9}60
z&VIoX_rZV7+hvSiboIJQ)F%T^x!`NH%7=;rwo2U_AV1Yc>#)|@Zz?-9oNi~PrXFG*
z>`vq-YCXB;VTuE?zel@m^d2$9V*IrIzmL>@G|rg1XL+&ayhFEXU%N1V}rj)*aL&5yNh+m@5=MYPdGX6`3KnS^;BFqL^=rb57=Oh#i==xtf1
z^riv>6^QfemXG&a(<)p`NN_BjwsCveJPj4!jN}jDc5_pIZ1?G@{_y?#(;*MtKIa;Cr@dhJYf)jY<
z&xUia(0QqVb6!5zt~K{V`Q&8u3=~})F=9^RS49HQWD51_SNTKV*L5^AnC3G2f?Z|Bf{sRKX9q7(n((<-{l2!-
z7TSB2OzHT2)4+9e^3|@g+#hCUb_TnqR0;nQwF4+*DW4F)RAJJNfkOR(`-FWpVBWkZ
z6Sse^y>6+c^*u^o+FloeGr6s$vEeD<#pcdKEbvEWftmlZ(Jk&L#Ej?5=gF+cilygf%a?7&M`}gfLH8Hu5IscaJuS>75(?%D8
zFyBWQ#L6zM$yfp@=0lbd4p;i{gUr054+rG;9@gm}Oh7u5(hT<_15*>o@ku$irri>S
zVB@LBY*rEY#xo(iD;44%@WB`&U!#i%-4T#7meg=w$~61hQ({^o8p85kMz-qep3if2
zTOQ`b4651g@^Mw7rlh4{vXj&Vp97!fzxVW$@6&tO9M3~@vxmG(4O^$#Q&M=m&rF7d
zx$gZIkaY+oMQ?Q-f=}4SYHG@WHYGx)#7-9RK+GWzNmp_|c<_GTZByeWwtd{C%koYg
z2fcpp|0Fx0yTm>^?hRWYh?rl<_0
z+2EV&L}trD`XRxWT7D5)NQAaXl1Ga6Q|Nct?~`9k!dj+Af6NQlRb)K0j&57I?NzfB
zDw7MeE$S?FF_|(zIbn=dF9gN~6@H1WDDCX%s2ks@x^XEq6whK4!
zET-9>WLLGpHC2Bd(4EL-U42oNpY^KG!%)U*ctT&y3L9=OxnX#c+%AT1gSn+Ww2vgl
z(5+tUcXiW)!?6%f3*fe1Am1);Zl1PJinCLSEj-#IpF|
zyqinj-@V;^;WwQjU+My{r>%Odqq{Ls_kmTy&R;D_&s9F~_ZmEI_hOEDz;O7M7Y#4k
z{9e1!Fkbn_-XDW7KAEPb9?{$*vwQ2P73=GH^1Y)_YwFS5+7+KH8Yz-er(&ii?NPO
z&EEU>H>_EoAiCVyT7QPEwJq2ftKNQ*yUfB%R^y}(_^F(a863XtQ#%!jdC9D)Upsg1
z{143TDE)Hfho1n^)+an2X3GE5T%De(8K1br*Z*&%Z&d&C7Kb;Qb)z*7YCO-Icj1h-
z#>%_?gUhbhn9Bb9o0XiOcJlV$R_BnXpg&$kUjB~zbF
z_XE;uGacpjj4O{03Fgxn=)948o|td6+EJil()74u)C2AJ>Y9CgB4k}ERg^w5P2HWI
z3U%i4sIxV_4}&hcKY#vw>o_^WKs4+1s#EWLnkO~)#=gCCe`Rmemt<(e^lGTu(a8W&^3-Gg^K)m|@B>8@
zQ`OWC(q}*{9foU(jFw9&o{5!i3)&w1PsIKIXFYF~eUIm-PMu0D2>EB0;Ujp`?o`?A
zud7lxf|#=OiMfmIP(-9+JgU2JV(-PqCE7i&dx;%PhNm+ADX-cqAy?pX0Qxqwl5CtsQ4HL
z!@lF-*M)pH>YcEzAbm4@A|hB3z^UflTq^5+4kN|i-&NB;3KW8FK_MnI?>x>qGSQ#C
zYU0T4&fA%yRus
z1V#b}Lmw`w{Td$Nb(KP1_RVAPY;$2D18sBSA32ZtT7+)lBL9dx!79Fh
zH&*oVW95={hpn~*xG1;JDeKD+`G9890vl`#WyWFnkHcQE2PDGyp@xG=0-Aj&Dk^G4
zWBj6vJ=)CKPVA}}x=8^w*|h20#>6Y@G!cM2r*#tF6;w*laGYjS#HpzX9B0m_NNcRg
zOm7gk(lU%7)6!W*n@sv;HxNqUVJBnfbNSzwG!4UNz441_m^F2s_NSZvKcxWQ9^(dl@dT6IfRxA|h5AHuG5j6J`??jZL&}@sK9PuRKjNUV>
zX*hZ(E`>3JekN<~wA|U3yY0R_SAyG*Y;*?%l;mDEIL^Q?5|NB8Ez`iLWCWuxyk*zz
zdGf1BHwNT=!qm)a?M8k^MlvXrXh>-bkszqkk@0l8>V5P$Cwn`RMv{~mVN4e}wxA6i
zY0U(7iy}o*2`)`{qXkDL%y4vSL&b>6^v~6Hgq1UaYG@d|4)|bg0Yn2}7l?R5s4eOA
zd(O`s&$M@rGeb~-BJ2mEw&3W2ER`P;Ugo9s$qv{Qg^8P^Jv!q!83uPNl7e|V;vHhq
z7}3zHTeth@2GJFZG;g8znnv*X$r__0?~S)_cfWVAOM+#3oP#x_O8fK4t#shV4-5ni_LHD>>del#J3a_y7+Ve44Ew0I>MB0
zgLJU-QUMfD$b+RRjNdFvB>Ez>t~CE@yUUl8r|alx=@7$p$4?j4%N?nPBO2yKkWa}*;2hnxVjXS6iVLQqKF0*%;Ef+yf&EU=zc)Gj(we4{Foz3lbiMGEE
z@t}mw`(XeH%tBX!UW2h$1o8l5-QHU<0EMO!cNht6EnYHldxD!=JEHED1`c#W>2N7F
zHulQMyQQe~fKXDNJaJ=b2*bnng2c#C$-H)9jz$V4Tv6ztgj$?~m((lIuMVGIX_k5Y
zMSA-jB{N?s%qB{NGm@b8G1x;>WE`@R)>xzs=+^C^i;Fbmi~!%okVEkpFvDRH6@k%K
zfB&F@JN_{N*gr(mMhDIOnpstM@ztH2=5;tH@v!jS+j#5DLC4IoBl0l?bGGe!{{F%1
ztp`}%m@;cszEKO=NYQl`ExHR9Xhp9W`HRQu&R;9RdTI>GLB&-`zd$4@0IARrAUkJx
zEkcH+v+N}ygjh4-cM<`(qVhuALv=+F%;0Nf*
zSS?^wCleA9&S;{+6*04Hk;B2kCGGYmIzQcZ=Y4B-w$S<_d(;NIkzF%6Dn9C!WphPKvj+#!$jovQ=AJSb
z)`D{j^$RH^;?{<=UCKWSsoR(}w5o?bT`>2*v;dzoRn{jZURON-A;IP8;?-+s;7z!s
z7o@GT3SdDfDUj))8B{GB%~$iPk_z;0M~7kps?X8P?~|5r;YB35fs~EqF$sst?_H6y
zpM3GgjT`DCR`o#{iNyi_tBBKk_PlwUB1%z8c~4v`?MzUpa3@a4N-z76USt
z@t;d4UwB*L^~z|96VNYLwjc8LHgM0|2!ag88zMrsP@VIL^uY=jy*#ycRsC-TjD%q4
z>|O6l_V5Vl7l_pc{`q0H!m3|kS;^4Z+tbSj>_9$LI<1{BgyJDQuYWw2dpG0baayUQ
zqfYUr^)ubHDcsq6)ao-VJCTITa=mCVU|@JD|L!pY{>RW1TFoEl?@=0
zEJ3`-W$yh{s=4XhU-mjisb~Cq7?kAKiJ(Q4AhPkS#4#Qe6oeE_FLvE@8Z~D;Ih2i1
zYPoR4@tul`ih_1I?onE|az&m8@?Z|rR>olWob6q8b+bopskz<>@s=>zBN+zV^Hg%+
zl7*XQGkdgAoc%7?|MJev%uFG1BW%pA>hYL-SFt*ZRgJ)?x2kjw>SC|?p|x;EK%_;u%4Z`CZo3?Y>%C>St&czC~9x3cU2
zRJvYD?^9D-4IUhvx9%qoP*5H(8>|fj63)e9*Dwp+cqpdeF;8?4Ue12~`q3Ud-BDN%
z^ey@FE(A
ziWiz)pahhF-jib0>toNKSCEKKio%iOyYhJ_$I1bqEnOY!e%P=0)`Sh3Fr%SZjBpZt
z^690dzJJyq!t0*j+rRg7c0g>;z38Vz)+@&